aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig88
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c6
-rw-r--r--[-rwxr-xr-x]drivers/net/amd8111e.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/amd8111e.h0
-rw-r--r--drivers/net/arm/am79c961a.c42
-rw-r--r--drivers/net/arm/am79c961a.h2
-rw-r--r--drivers/net/au1000_eth.c6
-rw-r--r--drivers/net/b44.c28
-rw-r--r--drivers/net/bmac.c7
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/e1000/e1000_main.c9
-rw-r--r--drivers/net/eepro.c57
-rw-r--r--drivers/net/fec_8xx/Kconfig8
-rw-r--r--drivers/net/fec_8xx/fec_mii.c42
-rw-r--r--drivers/net/fs_enet/Kconfig20
-rw-r--r--drivers/net/fs_enet/Makefile10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1229
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c507
-rw-r--r--drivers/net/fs_enet/fs_enet.h245
-rw-r--r--drivers/net/fs_enet/mac-fcc.c578
-rw-r--r--drivers/net/fs_enet/mac-fec.c653
-rw-r--r--drivers/net/fs_enet/mac-scc.c524
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c405
-rw-r--r--drivers/net/fs_enet/mii-fixed.c92
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/gianfar_mii.c1
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/ibm_emac/Makefile13
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h428
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c3400
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.h313
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.c363
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.h63
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.c674
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h333
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.c347
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.h105
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.c201
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.h60
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.c111
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.h96
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.c255
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.h104
-rw-r--r--drivers/net/ibmveth.c200
-rw-r--r--drivers/net/ibmveth.h23
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/irda-usb.c6
-rw-r--r--drivers/net/irda/irport.c3
-rw-r--r--drivers/net/irda/pxaficp_ir.c866
-rw-r--r--drivers/net/irda/sa1100_ir.c10
-rw-r--r--drivers/net/irda/sir_dev.c3
-rw-r--r--drivers/net/irda/smsc-ircc2.c140
-rw-r--r--drivers/net/irda/vlsi_ir.c3
-rw-r--r--drivers/net/iseries_veth.c26
-rw-r--r--drivers/net/jazzsonic.c2
-rw-r--r--drivers/net/lasi_82596.c30
-rw-r--r--drivers/net/mace.c7
-rw-r--r--drivers/net/macsonic.c2
-rw-r--r--drivers/net/mipsnet.c1
-rw-r--r--drivers/net/mv643xx_eth.c5
-rw-r--r--drivers/net/ne2k-pci.c1
-rw-r--r--drivers/net/ni65.c9
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c6
-rw-r--r--drivers/net/pcnet32.c87
-rw-r--r--drivers/net/phy/mdio_bus.c23
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/rrunner.c6
-rw-r--r--drivers/net/s2io.c753
-rw-r--r--drivers/net/s2io.h91
-rw-r--r--drivers/net/saa9730.c8
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c16
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/smc91x.c14
-rw-r--r--drivers/net/smc91x.h12
-rw-r--r--drivers/net/starfire.c4
-rw-r--r--drivers/net/sundance.c62
-rw-r--r--drivers/net/tg3.c91
-rw-r--r--drivers/net/tg3.h12
-rw-r--r--drivers/net/tokenring/proteon.c1
-rw-r--r--drivers/net/tokenring/skisa.c1
-rw-r--r--drivers/net/tulip/de2104x.c6
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wireless/airo.c57
-rw-r--r--drivers/net/wireless/airo_cs.c4
-rw-r--r--drivers/net/wireless/atmel.c6
-rw-r--r--drivers/net/wireless/atmel_cs.c3
-rw-r--r--drivers/net/wireless/hermes.c38
-rw-r--r--drivers/net/wireless/hermes.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c9
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/orinoco.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c2
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c9
-rw-r--r--drivers/net/wireless/strip.c38
105 files changed, 10496 insertions, 3738 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b39cba36d15e..e2d5b77764a2 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1163,38 +1163,74 @@ config IBMVETH
1163 be called ibmveth. 1163 be called ibmveth.
1164 1164
1165config IBM_EMAC 1165config IBM_EMAC
1166 bool "IBM PPC4xx EMAC driver support" 1166 tristate "PowerPC 4xx on-chip Ethernet support"
1167 depends on 4xx 1167 depends on 4xx
1168 select CRC32 1168 help
1169 ---help--- 1169 This driver supports the PowerPC 4xx EMAC family of on-chip
1170 This driver supports the IBM PPC4xx EMAC family of on-chip 1170 Ethernet controllers.
1171 Ethernet controllers.
1172
1173config IBM_EMAC_ERRMSG
1174 bool "Verbose error messages"
1175 depends on IBM_EMAC && BROKEN
1176 1171
1177config IBM_EMAC_RXB 1172config IBM_EMAC_RXB
1178 int "Number of receive buffers" 1173 int "Number of receive buffers"
1179 depends on IBM_EMAC 1174 depends on IBM_EMAC
1180 default "128" if IBM_EMAC4 1175 default "128"
1181 default "64"
1182 1176
1183config IBM_EMAC_TXB 1177config IBM_EMAC_TXB
1184 int "Number of transmit buffers" 1178 int "Number of transmit buffers"
1185 depends on IBM_EMAC 1179 depends on IBM_EMAC
1186 default "128" if IBM_EMAC4 1180 default "64"
1187 default "8"
1188 1181
1189config IBM_EMAC_FGAP 1182config IBM_EMAC_POLL_WEIGHT
1190 int "Frame gap" 1183 int "MAL NAPI polling weight"
1191 depends on IBM_EMAC 1184 depends on IBM_EMAC
1192 default "8" 1185 default "32"
1193 1186
1194config IBM_EMAC_SKBRES 1187config IBM_EMAC_RX_COPY_THRESHOLD
1195 int "Skb reserve amount" 1188 int "RX skb copy threshold (bytes)"
1189 depends on IBM_EMAC
1190 default "256"
1191
1192config IBM_EMAC_RX_SKB_HEADROOM
1193 int "Additional RX skb headroom (bytes)"
1196 depends on IBM_EMAC 1194 depends on IBM_EMAC
1197 default "0" 1195 default "0"
1196 help
1197 Additional receive skb headroom. Note, that driver
1198 will always reserve at least 2 bytes to make IP header
1199 aligned, so usualy there is no need to add any additional
1200 headroom.
1201
1202 If unsure, set to 0.
1203
1204config IBM_EMAC_PHY_RX_CLK_FIX
1205 bool "PHY Rx clock workaround"
1206 depends on IBM_EMAC && (405EP || 440GX || 440EP || 440GR)
1207 help
1208 Enable this if EMAC attached to a PHY which doesn't generate
1209 RX clock if there is no link, if this is the case, you will
1210 see "TX disable timeout" or "RX disable timeout" in the system
1211 log.
1212
1213 If unsure, say N.
1214
1215config IBM_EMAC_DEBUG
1216 bool "Debugging"
1217 depends on IBM_EMAC
1218 default n
1219
1220config IBM_EMAC_ZMII
1221 bool
1222 depends on IBM_EMAC && (NP405H || NP405L || 44x)
1223 default y
1224
1225config IBM_EMAC_RGMII
1226 bool
1227 depends on IBM_EMAC && 440GX
1228 default y
1229
1230config IBM_EMAC_TAH
1231 bool
1232 depends on IBM_EMAC && 440GX
1233 default y
1198 1234
1199config NET_PCI 1235config NET_PCI
1200 bool "EISA, VLB, PCI and on board controllers" 1236 bool "EISA, VLB, PCI and on board controllers"
@@ -1775,6 +1811,7 @@ config NE_H8300
1775 controller on the Renesas H8/300 processor. 1811 controller on the Renesas H8/300 processor.
1776 1812
1777source "drivers/net/fec_8xx/Kconfig" 1813source "drivers/net/fec_8xx/Kconfig"
1814source "drivers/net/fs_enet/Kconfig"
1778 1815
1779endmenu 1816endmenu
1780 1817
@@ -2219,8 +2256,8 @@ config S2IO
2219 depends on PCI 2256 depends on PCI
2220 ---help--- 2257 ---help---
2221 This driver supports the 10Gbe XFrame NIC of S2IO. 2258 This driver supports the 10Gbe XFrame NIC of S2IO.
2222 For help regarding driver compilation, installation and 2259 More specific information on configuring the driver is in
2223 tuning please look into ~/drivers/net/s2io/README.txt. 2260 <file:Documentation/networking/s2io.txt>.
2224 2261
2225config S2IO_NAPI 2262config S2IO_NAPI
2226 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)" 2263 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
@@ -2239,17 +2276,6 @@ config S2IO_NAPI
2239 2276
2240 If in doubt, say N. 2277 If in doubt, say N.
2241 2278
2242config 2BUFF_MODE
2243 bool "Use 2 Buffer Mode on Rx side."
2244 depends on S2IO
2245 ---help---
2246 On enabling the 2 buffer mode, the received frame will be
2247 split into 2 parts before being DMA'ed to the hosts memory.
2248 The parts are the ethernet header and ethernet payload.
2249 This is useful on systems where DMA'ing to to unaligned
2250 physical memory loactions comes with a heavy price.
2251 If not sure please say N.
2252
2253endmenu 2279endmenu
2254 2280
2255if !UML 2281if !UML
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8b3403c7f5e0..5dccac434d48 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -204,3 +204,6 @@ obj-$(CONFIG_IRDA) += irda/
204obj-$(CONFIG_ETRAX_ETHERNET) += cris/ 204obj-$(CONFIG_ETRAX_ETHERNET) += cris/
205 205
206obj-$(CONFIG_NETCONSOLE) += netconsole.o 206obj-$(CONFIG_NETCONSOLE) += netconsole.o
207
208obj-$(CONFIG_FS_ENET) += fs_enet/
209
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index dbecc6bf7851..b8953de5664a 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -871,10 +871,8 @@ static void ace_init_cleanup(struct net_device *dev)
871 if (ap->info) 871 if (ap->info)
872 pci_free_consistent(ap->pdev, sizeof(struct ace_info), 872 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
873 ap->info, ap->info_dma); 873 ap->info, ap->info_dma);
874 if (ap->skb) 874 kfree(ap->skb);
875 kfree(ap->skb); 875 kfree(ap->trace_buf);
876 if (ap->trace_buf)
877 kfree(ap->trace_buf);
878 876
879 if (dev->irq) 877 if (dev->irq)
880 free_irq(dev->irq, dev); 878 free_irq(dev->irq, dev);
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index d9ba8be72af8..d9ba8be72af8 100755..100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index cfe3a4298822..cfe3a4298822 100755..100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index c56d86d371a9..877891a29aaa 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -26,10 +26,11 @@
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/crc32.h> 27#include <linux/crc32.h>
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29#include <linux/platform_device.h>
29 30
30#include <asm/system.h> 31#include <asm/hardware.h>
31#include <asm/irq.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/system.h>
33 34
34#define TX_BUFFERS 15 35#define TX_BUFFERS 15
35#define RX_BUFFERS 25 36#define RX_BUFFERS 25
@@ -279,10 +280,13 @@ static void am79c961_timer(unsigned long data)
279 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST; 280 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
280 carrier = netif_carrier_ok(dev); 281 carrier = netif_carrier_ok(dev);
281 282
282 if (lnkstat && !carrier) 283 if (lnkstat && !carrier) {
283 netif_carrier_on(dev); 284 netif_carrier_on(dev);
284 else if (!lnkstat && carrier) 285 printk("%s: link up\n", dev->name);
286 } else if (!lnkstat && carrier) {
285 netif_carrier_off(dev); 287 netif_carrier_off(dev);
288 printk("%s: link down\n", dev->name);
289 }
286 290
287 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500)); 291 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
288} 292}
@@ -664,17 +668,25 @@ static void __init am79c961_banner(void)
664 printk(KERN_INFO "%s", version); 668 printk(KERN_INFO "%s", version);
665} 669}
666 670
667static int __init am79c961_init(void) 671static int __init am79c961_probe(struct device *_dev)
668{ 672{
673 struct platform_device *pdev = to_platform_device(_dev);
674 struct resource *res;
669 struct net_device *dev; 675 struct net_device *dev;
670 struct dev_priv *priv; 676 struct dev_priv *priv;
671 int i, ret; 677 int i, ret;
672 678
679 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
680 if (!res)
681 return -ENODEV;
682
673 dev = alloc_etherdev(sizeof(struct dev_priv)); 683 dev = alloc_etherdev(sizeof(struct dev_priv));
674 ret = -ENOMEM; 684 ret = -ENOMEM;
675 if (!dev) 685 if (!dev)
676 goto out; 686 goto out;
677 687
688 SET_NETDEV_DEV(dev, &pdev->dev);
689
678 priv = netdev_priv(dev); 690 priv = netdev_priv(dev);
679 691
680 /* 692 /*
@@ -682,8 +694,8 @@ static int __init am79c961_init(void)
682 * The PNP initialisation should have been 694 * The PNP initialisation should have been
683 * done by the ether bootp loader. 695 * done by the ether bootp loader.
684 */ 696 */
685 dev->base_addr = 0x220; 697 dev->base_addr = res->start;
686 dev->irq = IRQ_EBSA110_ETHERNET; 698 dev->irq = platform_get_irq(pdev, 0);
687 699
688 ret = -ENODEV; 700 ret = -ENODEV;
689 if (!request_region(dev->base_addr, 0x18, dev->name)) 701 if (!request_region(dev->base_addr, 0x18, dev->name))
@@ -704,11 +716,11 @@ static int __init am79c961_init(void)
704 inb(dev->base_addr + 4) != 0x2b) 716 inb(dev->base_addr + 4) != 0x2b)
705 goto release; 717 goto release;
706 718
707 am79c961_banner();
708
709 for (i = 0; i < 6; i++) 719 for (i = 0; i < 6; i++)
710 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff; 720 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
711 721
722 am79c961_banner();
723
712 spin_lock_init(&priv->chip_lock); 724 spin_lock_init(&priv->chip_lock);
713 init_timer(&priv->timer); 725 init_timer(&priv->timer);
714 priv->timer.data = (unsigned long)dev; 726 priv->timer.data = (unsigned long)dev;
@@ -731,6 +743,7 @@ static int __init am79c961_init(void)
731 if (ret == 0) { 743 if (ret == 0) {
732 printk(KERN_INFO "%s: ether address ", dev->name); 744 printk(KERN_INFO "%s: ether address ", dev->name);
733 745
746 /* Retrive and print the ethernet address. */
734 for (i = 0; i < 6; i++) 747 for (i = 0; i < 6; i++)
735 printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]); 748 printk (i == 5 ? "%02x\n" : "%02x:", dev->dev_addr[i]);
736 749
@@ -745,4 +758,15 @@ out:
745 return ret; 758 return ret;
746} 759}
747 760
761static struct device_driver am79c961_driver = {
762 .name = "am79c961",
763 .bus = &platform_bus_type,
764 .probe = am79c961_probe,
765};
766
767static int __init am79c961_init(void)
768{
769 return driver_register(&am79c961_driver);
770}
771
748__initcall(am79c961_init); 772__initcall(am79c961_init);
diff --git a/drivers/net/arm/am79c961a.h b/drivers/net/arm/am79c961a.h
index 1e9b05050cbe..6a49ac7f6d46 100644
--- a/drivers/net/arm/am79c961a.h
+++ b/drivers/net/arm/am79c961a.h
@@ -143,6 +143,4 @@ struct dev_priv {
143 struct timer_list timer; 143 struct timer_list timer;
144}; 144};
145 145
146extern int am79c961_probe (struct net_device *dev);
147
148#endif 146#endif
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 78506911d656..332e9953c55c 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1606,8 +1606,7 @@ err_out:
1606 /* here we should have a valid dev plus aup-> register addresses 1606 /* here we should have a valid dev plus aup-> register addresses
1607 * so we can reset the mac properly.*/ 1607 * so we can reset the mac properly.*/
1608 reset_mac(dev); 1608 reset_mac(dev);
1609 if (aup->mii) 1609 kfree(aup->mii);
1610 kfree(aup->mii);
1611 for (i = 0; i < NUM_RX_DMA; i++) { 1610 for (i = 0; i < NUM_RX_DMA; i++) {
1612 if (aup->rx_db_inuse[i]) 1611 if (aup->rx_db_inuse[i])
1613 ReleaseDB(aup, aup->rx_db_inuse[i]); 1612 ReleaseDB(aup, aup->rx_db_inuse[i]);
@@ -1806,8 +1805,7 @@ static void __exit au1000_cleanup_module(void)
1806 if (dev) { 1805 if (dev) {
1807 aup = (struct au1000_private *) dev->priv; 1806 aup = (struct au1000_private *) dev->priv;
1808 unregister_netdev(dev); 1807 unregister_netdev(dev);
1809 if (aup->mii) 1808 kfree(aup->mii);
1810 kfree(aup->mii);
1811 for (j = 0; j < NUM_RX_DMA; j++) { 1809 for (j = 0; j < NUM_RX_DMA; j++) {
1812 if (aup->rx_db_inuse[j]) 1810 if (aup->rx_db_inuse[j])
1813 ReleaseDB(aup, aup->rx_db_inuse[j]); 1811 ReleaseDB(aup, aup->rx_db_inuse[j]);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 282ebd15f011..0ee3e27969c6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/version.h> 21#include <linux/version.h>
22#include <linux/dma-mapping.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
@@ -1130,14 +1131,10 @@ static void b44_init_rings(struct b44 *bp)
1130 */ 1131 */
1131static void b44_free_consistent(struct b44 *bp) 1132static void b44_free_consistent(struct b44 *bp)
1132{ 1133{
1133 if (bp->rx_buffers) { 1134 kfree(bp->rx_buffers);
1134 kfree(bp->rx_buffers); 1135 bp->rx_buffers = NULL;
1135 bp->rx_buffers = NULL; 1136 kfree(bp->tx_buffers);
1136 } 1137 bp->tx_buffers = NULL;
1137 if (bp->tx_buffers) {
1138 kfree(bp->tx_buffers);
1139 bp->tx_buffers = NULL;
1140 }
1141 if (bp->rx_ring) { 1138 if (bp->rx_ring) {
1142 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1139 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1143 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, 1140 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
@@ -1619,14 +1616,14 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1619 1616
1620 cmd->advertising = 0; 1617 cmd->advertising = 0;
1621 if (bp->flags & B44_FLAG_ADV_10HALF) 1618 if (bp->flags & B44_FLAG_ADV_10HALF)
1622 cmd->advertising |= ADVERTISE_10HALF; 1619 cmd->advertising |= ADVERTISED_10baseT_Half;
1623 if (bp->flags & B44_FLAG_ADV_10FULL) 1620 if (bp->flags & B44_FLAG_ADV_10FULL)
1624 cmd->advertising |= ADVERTISE_10FULL; 1621 cmd->advertising |= ADVERTISED_10baseT_Full;
1625 if (bp->flags & B44_FLAG_ADV_100HALF) 1622 if (bp->flags & B44_FLAG_ADV_100HALF)
1626 cmd->advertising |= ADVERTISE_100HALF; 1623 cmd->advertising |= ADVERTISED_100baseT_Half;
1627 if (bp->flags & B44_FLAG_ADV_100FULL) 1624 if (bp->flags & B44_FLAG_ADV_100FULL)
1628 cmd->advertising |= ADVERTISE_100FULL; 1625 cmd->advertising |= ADVERTISED_100baseT_Full;
1629 cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1626 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1630 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? 1627 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1631 SPEED_100 : SPEED_10; 1628 SPEED_100 : SPEED_10;
1632 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? 1629 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
@@ -2044,6 +2041,8 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2044 b44_free_rings(bp); 2041 b44_free_rings(bp);
2045 2042
2046 spin_unlock_irq(&bp->lock); 2043 spin_unlock_irq(&bp->lock);
2044
2045 free_irq(dev->irq, dev);
2047 pci_disable_device(pdev); 2046 pci_disable_device(pdev);
2048 return 0; 2047 return 0;
2049} 2048}
@@ -2060,6 +2059,9 @@ static int b44_resume(struct pci_dev *pdev)
2060 if (!netif_running(dev)) 2059 if (!netif_running(dev))
2061 return 0; 2060 return 0;
2062 2061
2062 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2063 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2064
2063 spin_lock_irq(&bp->lock); 2065 spin_lock_irq(&bp->lock);
2064 2066
2065 b44_init_rings(bp); 2067 b44_init_rings(bp);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 60dba4a1ca5c..bbca8ae8018c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1658,6 +1658,7 @@ static struct of_device_id bmac_match[] =
1658 }, 1658 },
1659 {}, 1659 {},
1660}; 1660};
1661MODULE_DEVICE_TABLE (of, bmac_match);
1661 1662
1662static struct macio_driver bmac_driver = 1663static struct macio_driver bmac_driver =
1663{ 1664{
@@ -1689,10 +1690,8 @@ static void __exit bmac_exit(void)
1689{ 1690{
1690 macio_unregister_driver(&bmac_driver); 1691 macio_unregister_driver(&bmac_driver);
1691 1692
1692 if (bmac_emergency_rxbuf != NULL) { 1693 kfree(bmac_emergency_rxbuf);
1693 kfree(bmac_emergency_rxbuf); 1694 bmac_emergency_rxbuf = NULL;
1694 bmac_emergency_rxbuf = NULL;
1695 }
1696} 1695}
1697 1696
1698MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1697MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 3a2ace01e444..11d252318221 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -314,20 +314,16 @@ bnx2_free_mem(struct bnx2 *bp)
314 bp->tx_desc_ring, bp->tx_desc_mapping); 314 bp->tx_desc_ring, bp->tx_desc_mapping);
315 bp->tx_desc_ring = NULL; 315 bp->tx_desc_ring = NULL;
316 } 316 }
317 if (bp->tx_buf_ring) { 317 kfree(bp->tx_buf_ring);
318 kfree(bp->tx_buf_ring); 318 bp->tx_buf_ring = NULL;
319 bp->tx_buf_ring = NULL;
320 }
321 if (bp->rx_desc_ring) { 319 if (bp->rx_desc_ring) {
322 pci_free_consistent(bp->pdev, 320 pci_free_consistent(bp->pdev,
323 sizeof(struct rx_bd) * RX_DESC_CNT, 321 sizeof(struct rx_bd) * RX_DESC_CNT,
324 bp->rx_desc_ring, bp->rx_desc_mapping); 322 bp->rx_desc_ring, bp->rx_desc_mapping);
325 bp->rx_desc_ring = NULL; 323 bp->rx_desc_ring = NULL;
326 } 324 }
327 if (bp->rx_buf_ring) { 325 kfree(bp->rx_buf_ring);
328 kfree(bp->rx_buf_ring); 326 bp->rx_buf_ring = NULL;
329 bp->rx_buf_ring = NULL;
330 }
331} 327}
332 328
333static int 329static int
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index c4aa5fe2840e..4d26e5e7d18b 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -254,7 +254,7 @@
254#include <linux/unistd.h> 254#include <linux/unistd.h>
255#include <linux/ctype.h> 255#include <linux/ctype.h>
256#include <linux/moduleparam.h> 256#include <linux/moduleparam.h>
257#include <linux/device.h> 257#include <linux/platform_device.h>
258#include <linux/bitops.h> 258#include <linux/bitops.h>
259 259
260#include <asm/uaccess.h> 260#include <asm/uaccess.h>
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index e54fc10f6846..c0af6fb1fbba 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -66,6 +66,7 @@
66#include <linux/mii.h> 66#include <linux/mii.h>
67#include <linux/dm9000.h> 67#include <linux/dm9000.h>
68#include <linux/delay.h> 68#include <linux/delay.h>
69#include <linux/platform_device.h>
69 70
70#include <asm/delay.h> 71#include <asm/delay.h>
71#include <asm/irq.h> 72#include <asm/irq.h>
@@ -1140,11 +1141,11 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1140} 1141}
1141 1142
1142static int 1143static int
1143dm9000_drv_suspend(struct device *dev, pm_message_t state, u32 level) 1144dm9000_drv_suspend(struct device *dev, pm_message_t state)
1144{ 1145{
1145 struct net_device *ndev = dev_get_drvdata(dev); 1146 struct net_device *ndev = dev_get_drvdata(dev);
1146 1147
1147 if (ndev && level == SUSPEND_DISABLE) { 1148 if (ndev) {
1148 if (netif_running(ndev)) { 1149 if (netif_running(ndev)) {
1149 netif_device_detach(ndev); 1150 netif_device_detach(ndev);
1150 dm9000_shutdown(ndev); 1151 dm9000_shutdown(ndev);
@@ -1154,12 +1155,12 @@ dm9000_drv_suspend(struct device *dev, pm_message_t state, u32 level)
1154} 1155}
1155 1156
1156static int 1157static int
1157dm9000_drv_resume(struct device *dev, u32 level) 1158dm9000_drv_resume(struct device *dev)
1158{ 1159{
1159 struct net_device *ndev = dev_get_drvdata(dev); 1160 struct net_device *ndev = dev_get_drvdata(dev);
1160 board_info_t *db = (board_info_t *) ndev->priv; 1161 board_info_t *db = (board_info_t *) ndev->priv;
1161 1162
1162 if (ndev && level == RESUME_ENABLE) { 1163 if (ndev) {
1163 1164
1164 if (netif_running(ndev)) { 1165 if (netif_running(ndev)) {
1165 dm9000_reset(db); 1166 dm9000_reset(db);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 6b9acc7f94a3..9c7feaeaa6a4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -965,11 +965,8 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
965 if(rxdr->desc) 965 if(rxdr->desc)
966 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 966 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
967 967
968 if(txdr->buffer_info) 968 kfree(txdr->buffer_info);
969 kfree(txdr->buffer_info); 969 kfree(rxdr->buffer_info);
970 if(rxdr->buffer_info)
971 kfree(rxdr->buffer_info);
972
973 return; 970 return;
974} 971}
975 972
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6b72f6acdd54..efbbda7cbcbf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -191,8 +191,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
191static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 191static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
192static void e1000_restore_vlan(struct e1000_adapter *adapter); 192static void e1000_restore_vlan(struct e1000_adapter *adapter);
193 193
194static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
195#ifdef CONFIG_PM 194#ifdef CONFIG_PM
195static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
196static int e1000_resume(struct pci_dev *pdev); 196static int e1000_resume(struct pci_dev *pdev);
197#endif 197#endif
198 198
@@ -1149,7 +1149,8 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1149 int size; 1149 int size;
1150 1150
1151 size = sizeof(struct e1000_buffer) * txdr->count; 1151 size = sizeof(struct e1000_buffer) * txdr->count;
1152 txdr->buffer_info = vmalloc(size); 1152
1153 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1153 if(!txdr->buffer_info) { 1154 if(!txdr->buffer_info) {
1154 DPRINTK(PROBE, ERR, 1155 DPRINTK(PROBE, ERR,
1155 "Unable to allocate memory for the transmit descriptor ring\n"); 1156 "Unable to allocate memory for the transmit descriptor ring\n");
@@ -1366,7 +1367,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1366 int size, desc_len; 1367 int size, desc_len;
1367 1368
1368 size = sizeof(struct e1000_buffer) * rxdr->count; 1369 size = sizeof(struct e1000_buffer) * rxdr->count;
1369 rxdr->buffer_info = vmalloc(size); 1370 rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1370 if (!rxdr->buffer_info) { 1371 if (!rxdr->buffer_info) {
1371 DPRINTK(PROBE, ERR, 1372 DPRINTK(PROBE, ERR,
1372 "Unable to allocate memory for the receive descriptor ring\n"); 1373 "Unable to allocate memory for the receive descriptor ring\n");
@@ -4193,6 +4194,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4193 return 0; 4194 return 0;
4194} 4195}
4195 4196
4197#ifdef CONFIG_PM
4196static int 4198static int
4197e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4199e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4198{ 4200{
@@ -4289,7 +4291,6 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4289 return 0; 4291 return 0;
4290} 4292}
4291 4293
4292#ifdef CONFIG_PM
4293static int 4294static int
4294e1000_resume(struct pci_dev *pdev) 4295e1000_resume(struct pci_dev *pdev)
4295{ 4296{
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index dcb3028bb60f..a806dfe54d23 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -552,8 +552,7 @@ static int __init do_eepro_probe(struct net_device *dev)
552 { 552 {
553 unsigned short int WS[32]=WakeupSeq; 553 unsigned short int WS[32]=WakeupSeq;
554 554
555 if (check_region(WakeupPort, 2)==0) { 555 if (request_region(WakeupPort, 2, "eepro wakeup")) {
556
557 if (net_debug>5) 556 if (net_debug>5)
558 printk(KERN_DEBUG "Waking UP\n"); 557 printk(KERN_DEBUG "Waking UP\n");
559 558
@@ -563,7 +562,10 @@ static int __init do_eepro_probe(struct net_device *dev)
563 outb_p(WS[i],WakeupPort); 562 outb_p(WS[i],WakeupPort);
564 if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); 563 if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
565 } 564 }
566 } else printk(KERN_WARNING "Checkregion Failed!\n"); 565
566 release_region(WakeupPort, 2);
567 } else
568 printk(KERN_WARNING "PnP wakeup region busy!\n");
567 } 569 }
568#endif 570#endif
569 571
@@ -705,7 +707,7 @@ static void __init eepro_print_info (struct net_device *dev)
705 dev->name, (unsigned)dev->base_addr); 707 dev->name, (unsigned)dev->base_addr);
706 break; 708 break;
707 case LAN595FX: 709 case LAN595FX:
708 printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", 710 printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
709 dev->name, (unsigned)dev->base_addr); 711 dev->name, (unsigned)dev->base_addr);
710 break; 712 break;
711 case LAN595TX: 713 case LAN595TX:
@@ -713,7 +715,7 @@ static void __init eepro_print_info (struct net_device *dev)
713 dev->name, (unsigned)dev->base_addr); 715 dev->name, (unsigned)dev->base_addr);
714 break; 716 break;
715 case LAN595: 717 case LAN595:
716 printk("%s: Intel 82595-based lan card at %#x,", 718 printk("%s: Intel 82595-based lan card at %#x,",
717 dev->name, (unsigned)dev->base_addr); 719 dev->name, (unsigned)dev->base_addr);
718 } 720 }
719 721
@@ -726,7 +728,7 @@ static void __init eepro_print_info (struct net_device *dev)
726 728
727 if (dev->irq > 2) 729 if (dev->irq > 2)
728 printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); 730 printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
729 else 731 else
730 printk(", %s.\n", ifmap[dev->if_port]); 732 printk(", %s.\n", ifmap[dev->if_port]);
731 733
732 if (net_debug > 3) { 734 if (net_debug > 3) {
@@ -756,7 +758,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
756 int err; 758 int err;
757 759
758 /* Grab the region so we can find another board if autoIRQ fails. */ 760 /* Grab the region so we can find another board if autoIRQ fails. */
759 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { 761 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
760 if (!autoprobe) 762 if (!autoprobe)
761 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", 763 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
762 ioaddr); 764 ioaddr);
@@ -838,15 +840,15 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
838 /* Mask off INT number */ 840 /* Mask off INT number */
839 int count = lp->word[1] & 7; 841 int count = lp->word[1] & 7;
840 unsigned irqMask = lp->word[7]; 842 unsigned irqMask = lp->word[7];
841 843
842 while (count--) 844 while (count--)
843 irqMask &= irqMask - 1; 845 irqMask &= irqMask - 1;
844 846
845 count = ffs(irqMask); 847 count = ffs(irqMask);
846 848
847 if (count) 849 if (count)
848 dev->irq = count - 1; 850 dev->irq = count - 1;
849 851
850 if (dev->irq < 2) { 852 if (dev->irq < 2) {
851 printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); 853 printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
852 goto exit; 854 goto exit;
@@ -854,7 +856,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
854 dev->irq = 9; 856 dev->irq = 9;
855 } 857 }
856 } 858 }
857 859
858 dev->open = eepro_open; 860 dev->open = eepro_open;
859 dev->stop = eepro_close; 861 dev->stop = eepro_close;
860 dev->hard_start_xmit = eepro_send_packet; 862 dev->hard_start_xmit = eepro_send_packet;
@@ -863,7 +865,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
863 dev->tx_timeout = eepro_tx_timeout; 865 dev->tx_timeout = eepro_tx_timeout;
864 dev->watchdog_timeo = TX_TIMEOUT; 866 dev->watchdog_timeo = TX_TIMEOUT;
865 dev->ethtool_ops = &eepro_ethtool_ops; 867 dev->ethtool_ops = &eepro_ethtool_ops;
866 868
867 /* print boot time info */ 869 /* print boot time info */
868 eepro_print_info(dev); 870 eepro_print_info(dev);
869 871
@@ -1047,8 +1049,8 @@ static int eepro_open(struct net_device *dev)
1047 1049
1048 1050
1049 /* Initialize the RCV and XMT upper and lower limits */ 1051 /* Initialize the RCV and XMT upper and lower limits */
1050 outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); 1052 outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
1051 outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); 1053 outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
1052 outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); 1054 outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
1053 outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); 1055 outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
1054 1056
@@ -1065,12 +1067,12 @@ static int eepro_open(struct net_device *dev)
1065 eepro_clear_int(ioaddr); 1067 eepro_clear_int(ioaddr);
1066 1068
1067 /* Initialize RCV */ 1069 /* Initialize RCV */
1068 outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); 1070 outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
1069 lp->rx_start = lp->rcv_lower_limit; 1071 lp->rx_start = lp->rcv_lower_limit;
1070 outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); 1072 outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
1071 1073
1072 /* Initialize XMT */ 1074 /* Initialize XMT */
1073 outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); 1075 outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
1074 lp->tx_start = lp->tx_end = lp->xmt_lower_limit; 1076 lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
1075 lp->tx_last = 0; 1077 lp->tx_last = 0;
1076 1078
@@ -1411,7 +1413,7 @@ set_multicast_list(struct net_device *dev)
1411 outb(0x08, ioaddr + STATUS_REG); 1413 outb(0x08, ioaddr + STATUS_REG);
1412 1414
1413 if (i & 0x20) { /* command ABORTed */ 1415 if (i & 0x20) { /* command ABORTed */
1414 printk(KERN_NOTICE "%s: multicast setup failed.\n", 1416 printk(KERN_NOTICE "%s: multicast setup failed.\n",
1415 dev->name); 1417 dev->name);
1416 break; 1418 break;
1417 } else if ((i & 0x0f) == 0x03) { /* MC-Done */ 1419 } else if ((i & 0x0f) == 0x03) { /* MC-Done */
@@ -1512,7 +1514,7 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
1512 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; 1514 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
1513 1515
1514 if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ 1516 if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
1515 if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { 1517 if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
1516 /* Arrrr!!!, must keep the xmt header together, 1518 /* Arrrr!!!, must keep the xmt header together,
1517 several days were lost to chase this one down. */ 1519 several days were lost to chase this one down. */
1518 last = lp->xmt_lower_limit; 1520 last = lp->xmt_lower_limit;
@@ -1643,7 +1645,7 @@ eepro_rx(struct net_device *dev)
1643 else if (rcv_status & 0x0800) 1645 else if (rcv_status & 0x0800)
1644 lp->stats.rx_crc_errors++; 1646 lp->stats.rx_crc_errors++;
1645 1647
1646 printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", 1648 printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1647 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); 1649 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1648 } 1650 }
1649 1651
@@ -1674,10 +1676,10 @@ eepro_transmit_interrupt(struct net_device *dev)
1674{ 1676{
1675 struct eepro_local *lp = netdev_priv(dev); 1677 struct eepro_local *lp = netdev_priv(dev);
1676 short ioaddr = dev->base_addr; 1678 short ioaddr = dev->base_addr;
1677 short boguscount = 25; 1679 short boguscount = 25;
1678 short xmt_status; 1680 short xmt_status;
1679 1681
1680 while ((lp->tx_start != lp->tx_end) && boguscount--) { 1682 while ((lp->tx_start != lp->tx_end) && boguscount--) {
1681 1683
1682 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); 1684 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1683 xmt_status = inw(ioaddr+IO_PORT); 1685 xmt_status = inw(ioaddr+IO_PORT);
@@ -1723,7 +1725,7 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
1723{ 1725{
1724 struct eepro_local *lp = (struct eepro_local *)dev->priv; 1726 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1725 1727
1726 cmd->supported = SUPPORTED_10baseT_Half | 1728 cmd->supported = SUPPORTED_10baseT_Half |
1727 SUPPORTED_10baseT_Full | 1729 SUPPORTED_10baseT_Full |
1728 SUPPORTED_Autoneg; 1730 SUPPORTED_Autoneg;
1729 cmd->advertising = ADVERTISED_10baseT_Half | 1731 cmd->advertising = ADVERTISED_10baseT_Half |
@@ -1797,10 +1799,9 @@ MODULE_AUTHOR("Pascal Dupuis and others");
1797MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); 1799MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
1798MODULE_LICENSE("GPL"); 1800MODULE_LICENSE("GPL");
1799 1801
1800static int num_params; 1802module_param_array(io, int, NULL, 0);
1801module_param_array(io, int, &num_params, 0); 1803module_param_array(irq, int, NULL, 0);
1802module_param_array(irq, int, &num_params, 0); 1804module_param_array(mem, int, NULL, 0);
1803module_param_array(mem, int, &num_params, 0);
1804module_param(autodetect, int, 0); 1805module_param(autodetect, int, 0);
1805MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); 1806MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
1806MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); 1807MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig
index db36ac3ea453..4560026ed419 100644
--- a/drivers/net/fec_8xx/Kconfig
+++ b/drivers/net/fec_8xx/Kconfig
@@ -1,6 +1,6 @@
1config FEC_8XX 1config FEC_8XX
2 tristate "Motorola 8xx FEC driver" 2 tristate "Motorola 8xx FEC driver"
3 depends on NET_ETHERNET && 8xx && (NETTA || NETPHONE) 3 depends on NET_ETHERNET
4 select MII 4 select MII
5 5
6config FEC_8XX_GENERIC_PHY 6config FEC_8XX_GENERIC_PHY
@@ -12,3 +12,9 @@ config FEC_8XX_DM9161_PHY
12 bool "Support DM9161 PHY" 12 bool "Support DM9161 PHY"
13 depends on FEC_8XX 13 depends on FEC_8XX
14 default n 14 default n
15
16config FEC_8XX_LXT971_PHY
17 bool "Support LXT971/LXT972 PHY"
18 depends on FEC_8XX
19 default n
20
diff --git a/drivers/net/fec_8xx/fec_mii.c b/drivers/net/fec_8xx/fec_mii.c
index 803eb095cf8e..3b44ac1a7bfe 100644
--- a/drivers/net/fec_8xx/fec_mii.c
+++ b/drivers/net/fec_8xx/fec_mii.c
@@ -203,6 +203,39 @@ static void dm9161_shutdown(struct net_device *dev)
203 203
204#endif 204#endif
205 205
206#ifdef CONFIG_FEC_8XX_LXT971_PHY
207
208/* Support for LXT971/972 PHY */
209
210#define MII_LXT971_PCR 16 /* Port Control Register */
211#define MII_LXT971_SR2 17 /* Status Register 2 */
212#define MII_LXT971_IER 18 /* Interrupt Enable Register */
213#define MII_LXT971_ISR 19 /* Interrupt Status Register */
214#define MII_LXT971_LCR 20 /* LED Control Register */
215#define MII_LXT971_TCR 30 /* Transmit Control Register */
216
217static void lxt971_startup(struct net_device *dev)
218{
219 struct fec_enet_private *fep = netdev_priv(dev);
220
221 fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x00F2);
222}
223
224static void lxt971_ack_int(struct net_device *dev)
225{
226 struct fec_enet_private *fep = netdev_priv(dev);
227
228 fec_mii_read(dev, fep->mii_if.phy_id, MII_LXT971_ISR);
229}
230
231static void lxt971_shutdown(struct net_device *dev)
232{
233 struct fec_enet_private *fep = netdev_priv(dev);
234
235 fec_mii_write(dev, fep->mii_if.phy_id, MII_LXT971_IER, 0x0000);
236}
237#endif
238
206/**********************************************************************************/ 239/**********************************************************************************/
207 240
208static const struct phy_info phy_info[] = { 241static const struct phy_info phy_info[] = {
@@ -215,6 +248,15 @@ static const struct phy_info phy_info[] = {
215 .shutdown = dm9161_shutdown, 248 .shutdown = dm9161_shutdown,
216 }, 249 },
217#endif 250#endif
251#ifdef CONFIG_FEC_8XX_LXT971_PHY
252 {
253 .id = 0x0001378e,
254 .name = "LXT971/972",
255 .startup = lxt971_startup,
256 .ack_int = lxt971_ack_int,
257 .shutdown = lxt971_shutdown,
258 },
259#endif
218#ifdef CONFIG_FEC_8XX_GENERIC_PHY 260#ifdef CONFIG_FEC_8XX_GENERIC_PHY
219 { 261 {
220 .id = 0, 262 .id = 0,
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
new file mode 100644
index 000000000000..6aaee67dd4b7
--- /dev/null
+++ b/drivers/net/fs_enet/Kconfig
@@ -0,0 +1,20 @@
1config FS_ENET
2 tristate "Freescale Ethernet Driver"
3 depends on NET_ETHERNET && (CPM1 || CPM2)
4 select MII
5
6config FS_ENET_HAS_SCC
7 bool "Chip has an SCC usable for ethernet"
8 depends on FS_ENET && (CPM1 || CPM2)
9 default y
10
11config FS_ENET_HAS_FCC
12 bool "Chip has an FCC usable for ethernet"
13 depends on FS_ENET && CPM2
14 default y
15
16config FS_ENET_HAS_FEC
17 bool "Chip has an FEC usable for ethernet"
18 depends on FS_ENET && CPM1
19 default y
20
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
new file mode 100644
index 000000000000..d6dd3f2fb43e
--- /dev/null
+++ b/drivers/net/fs_enet/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Freescale Ethernet controllers
3#
4
5obj-$(CONFIG_FS_ENET) += fs_enet.o
6
7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
8obj-$(CONFIG_8260) += mac-fcc.o
9
10fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
new file mode 100644
index 000000000000..9342d5bc7bb4
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -0,0 +1,1229 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/mii.h>
37#include <linux/ethtool.h>
38#include <linux/bitops.h>
39#include <linux/fs.h>
40
41#include <linux/vmalloc.h>
42#include <asm/pgtable.h>
43
44#include <asm/pgtable.h>
45#include <asm/irq.h>
46#include <asm/uaccess.h>
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52static char version[] __devinitdata =
53 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
54
55MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
56MODULE_DESCRIPTION("Freescale Ethernet Driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_MODULE_VERSION);
59
60MODULE_PARM(fs_enet_debug, "i");
61MODULE_PARM_DESC(fs_enet_debug,
62 "Freescale bitmapped debugging message enable value");
63
64int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
65
66static void fs_set_multicast_list(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 (*fep->ops->set_multicast_list)(dev);
71}
72
73/* NAPI receive function */
74static int fs_enet_rx_napi(struct net_device *dev, int *budget)
75{
76 struct fs_enet_private *fep = netdev_priv(dev);
77 const struct fs_platform_info *fpi = fep->fpi;
78 cbd_t *bdp;
79 struct sk_buff *skb, *skbn, *skbt;
80 int received = 0;
81 u16 pkt_len, sc;
82 int curidx;
83 int rx_work_limit = 0; /* pacify gcc */
84
85 rx_work_limit = min(dev->quota, *budget);
86
87 if (!netif_running(dev))
88 return 0;
89
90 /*
91 * First, grab all of the stats for the incoming packet.
92 * These get messed up if we get called due to a busy condition.
93 */
94 bdp = fep->cur_rx;
95
96 /* clear RX status bits for napi*/
97 (*fep->ops->napi_clear_rx_event)(dev);
98
99 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
100
101 curidx = bdp - fep->rx_bd_base;
102
103 /*
104 * Since we have allocated space to hold a complete frame,
105 * the last indicator should be set.
106 */
107 if ((sc & BD_ENET_RX_LAST) == 0)
108 printk(KERN_WARNING DRV_MODULE_NAME
109 ": %s rcv is not +last\n",
110 dev->name);
111
112 /*
113 * Check for errors.
114 */
115 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
116 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
117 fep->stats.rx_errors++;
118 /* Frame too long or too short. */
119 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
120 fep->stats.rx_length_errors++;
121 /* Frame alignment */
122 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
123 fep->stats.rx_frame_errors++;
124 /* CRC Error */
125 if (sc & BD_ENET_RX_CR)
126 fep->stats.rx_crc_errors++;
127 /* FIFO overrun */
128 if (sc & BD_ENET_RX_OV)
129 fep->stats.rx_crc_errors++;
130
131 skb = fep->rx_skbuff[curidx];
132
133 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
134 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
135 DMA_FROM_DEVICE);
136
137 skbn = skb;
138
139 } else {
140
141 /* napi, got packet but no quota */
142 if (--rx_work_limit < 0)
143 break;
144
145 skb = fep->rx_skbuff[curidx];
146
147 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149 DMA_FROM_DEVICE);
150
151 /*
152 * Process the incoming frame.
153 */
154 fep->stats.rx_packets++;
155 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
156 fep->stats.rx_bytes += pkt_len + 4;
157
158 if (pkt_len <= fpi->rx_copybreak) {
159 /* +2 to make IP header L1 cache aligned */
160 skbn = dev_alloc_skb(pkt_len + 2);
161 if (skbn != NULL) {
162 skb_reserve(skbn, 2); /* align IP header */
163 memcpy(skbn->data, skb->data, pkt_len);
164 /* swap */
165 skbt = skb;
166 skb = skbn;
167 skbn = skbt;
168 }
169 } else
170 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
171
172 if (skbn != NULL) {
173 skb->dev = dev;
174 skb_put(skb, pkt_len); /* Make room */
175 skb->protocol = eth_type_trans(skb, dev);
176 received++;
177 netif_receive_skb(skb);
178 } else {
179 printk(KERN_WARNING DRV_MODULE_NAME
180 ": %s Memory squeeze, dropping packet.\n",
181 dev->name);
182 fep->stats.rx_dropped++;
183 skbn = skb;
184 }
185 }
186
187 fep->rx_skbuff[curidx] = skbn;
188 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
189 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
190 DMA_FROM_DEVICE));
191 CBDW_DATLEN(bdp, 0);
192 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
193
194 /*
195 * Update BD pointer to next entry.
196 */
197 if ((sc & BD_ENET_RX_WRAP) == 0)
198 bdp++;
199 else
200 bdp = fep->rx_bd_base;
201
202 (*fep->ops->rx_bd_done)(dev);
203 }
204
205 fep->cur_rx = bdp;
206
207 dev->quota -= received;
208 *budget -= received;
209
210 if (rx_work_limit < 0)
211 return 1; /* not done */
212
213 /* done */
214 netif_rx_complete(dev);
215
216 (*fep->ops->napi_enable_rx)(dev);
217
218 return 0;
219}
220
221/* non NAPI receive function */
222static int fs_enet_rx_non_napi(struct net_device *dev)
223{
224 struct fs_enet_private *fep = netdev_priv(dev);
225 const struct fs_platform_info *fpi = fep->fpi;
226 cbd_t *bdp;
227 struct sk_buff *skb, *skbn, *skbt;
228 int received = 0;
229 u16 pkt_len, sc;
230 int curidx;
231 /*
232 * First, grab all of the stats for the incoming packet.
233 * These get messed up if we get called due to a busy condition.
234 */
235 bdp = fep->cur_rx;
236
237 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
238
239 curidx = bdp - fep->rx_bd_base;
240
241 /*
242 * Since we have allocated space to hold a complete frame,
243 * the last indicator should be set.
244 */
245 if ((sc & BD_ENET_RX_LAST) == 0)
246 printk(KERN_WARNING DRV_MODULE_NAME
247 ": %s rcv is not +last\n",
248 dev->name);
249
250 /*
251 * Check for errors.
252 */
253 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
254 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
255 fep->stats.rx_errors++;
256 /* Frame too long or too short. */
257 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
258 fep->stats.rx_length_errors++;
259 /* Frame alignment */
260 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
261 fep->stats.rx_frame_errors++;
262 /* CRC Error */
263 if (sc & BD_ENET_RX_CR)
264 fep->stats.rx_crc_errors++;
265 /* FIFO overrun */
266 if (sc & BD_ENET_RX_OV)
267 fep->stats.rx_crc_errors++;
268
269 skb = fep->rx_skbuff[curidx];
270
271 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
272 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
273 DMA_FROM_DEVICE);
274
275 skbn = skb;
276
277 } else {
278
279 skb = fep->rx_skbuff[curidx];
280
281 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
282 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
283 DMA_FROM_DEVICE);
284
285 /*
286 * Process the incoming frame.
287 */
288 fep->stats.rx_packets++;
289 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
290 fep->stats.rx_bytes += pkt_len + 4;
291
292 if (pkt_len <= fpi->rx_copybreak) {
293 /* +2 to make IP header L1 cache aligned */
294 skbn = dev_alloc_skb(pkt_len + 2);
295 if (skbn != NULL) {
296 skb_reserve(skbn, 2); /* align IP header */
297 memcpy(skbn->data, skb->data, pkt_len);
298 /* swap */
299 skbt = skb;
300 skb = skbn;
301 skbn = skbt;
302 }
303 } else
304 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
305
306 if (skbn != NULL) {
307 skb->dev = dev;
308 skb_put(skb, pkt_len); /* Make room */
309 skb->protocol = eth_type_trans(skb, dev);
310 received++;
311 netif_rx(skb);
312 } else {
313 printk(KERN_WARNING DRV_MODULE_NAME
314 ": %s Memory squeeze, dropping packet.\n",
315 dev->name);
316 fep->stats.rx_dropped++;
317 skbn = skb;
318 }
319 }
320
321 fep->rx_skbuff[curidx] = skbn;
322 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
323 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
324 DMA_FROM_DEVICE));
325 CBDW_DATLEN(bdp, 0);
326 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
327
328 /*
329 * Update BD pointer to next entry.
330 */
331 if ((sc & BD_ENET_RX_WRAP) == 0)
332 bdp++;
333 else
334 bdp = fep->rx_bd_base;
335
336 (*fep->ops->rx_bd_done)(dev);
337 }
338
339 fep->cur_rx = bdp;
340
341 return 0;
342}
343
344static void fs_enet_tx(struct net_device *dev)
345{
346 struct fs_enet_private *fep = netdev_priv(dev);
347 cbd_t *bdp;
348 struct sk_buff *skb;
349 int dirtyidx, do_wake, do_restart;
350 u16 sc;
351
352 spin_lock(&fep->lock);
353 bdp = fep->dirty_tx;
354
355 do_wake = do_restart = 0;
356 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
357
358 dirtyidx = bdp - fep->tx_bd_base;
359
360 if (fep->tx_free == fep->tx_ring)
361 break;
362
363 skb = fep->tx_skbuff[dirtyidx];
364
365 /*
366 * Check for errors.
367 */
368 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
369 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
370
371 if (sc & BD_ENET_TX_HB) /* No heartbeat */
372 fep->stats.tx_heartbeat_errors++;
373 if (sc & BD_ENET_TX_LC) /* Late collision */
374 fep->stats.tx_window_errors++;
375 if (sc & BD_ENET_TX_RL) /* Retrans limit */
376 fep->stats.tx_aborted_errors++;
377 if (sc & BD_ENET_TX_UN) /* Underrun */
378 fep->stats.tx_fifo_errors++;
379 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
380 fep->stats.tx_carrier_errors++;
381
382 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
383 fep->stats.tx_errors++;
384 do_restart = 1;
385 }
386 } else
387 fep->stats.tx_packets++;
388
389 if (sc & BD_ENET_TX_READY)
390 printk(KERN_WARNING DRV_MODULE_NAME
391 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
392 dev->name);
393
394 /*
395 * Deferred means some collisions occurred during transmit,
396 * but we eventually sent the packet OK.
397 */
398 if (sc & BD_ENET_TX_DEF)
399 fep->stats.collisions++;
400
401 /* unmap */
402 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
403 skb->len, DMA_TO_DEVICE);
404
405 /*
406 * Free the sk buffer associated with this last transmit.
407 */
408 dev_kfree_skb_irq(skb);
409 fep->tx_skbuff[dirtyidx] = NULL;
410
411 /*
412 * Update pointer to next buffer descriptor to be transmitted.
413 */
414 if ((sc & BD_ENET_TX_WRAP) == 0)
415 bdp++;
416 else
417 bdp = fep->tx_bd_base;
418
419 /*
420 * Since we have freed up a buffer, the ring is no longer
421 * full.
422 */
423 if (!fep->tx_free++)
424 do_wake = 1;
425 }
426
427 fep->dirty_tx = bdp;
428
429 if (do_restart)
430 (*fep->ops->tx_restart)(dev);
431
432 spin_unlock(&fep->lock);
433
434 if (do_wake)
435 netif_wake_queue(dev);
436}
437
438/*
439 * The interrupt handler.
440 * This is called from the MPC core interrupt.
441 */
442static irqreturn_t
443fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
444{
445 struct net_device *dev = dev_id;
446 struct fs_enet_private *fep;
447 const struct fs_platform_info *fpi;
448 u32 int_events;
449 u32 int_clr_events;
450 int nr, napi_ok;
451 int handled;
452
453 fep = netdev_priv(dev);
454 fpi = fep->fpi;
455
456 nr = 0;
457 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
458
459 nr++;
460
461 int_clr_events = int_events;
462 if (fpi->use_napi)
463 int_clr_events &= ~fep->ev_napi_rx;
464
465 (*fep->ops->clear_int_events)(dev, int_clr_events);
466
467 if (int_events & fep->ev_err)
468 (*fep->ops->ev_error)(dev, int_events);
469
470 if (int_events & fep->ev_rx) {
471 if (!fpi->use_napi)
472 fs_enet_rx_non_napi(dev);
473 else {
474 napi_ok = netif_rx_schedule_prep(dev);
475
476 (*fep->ops->napi_disable_rx)(dev);
477 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
478
479 /* NOTE: it is possible for FCCs in NAPI mode */
480 /* to submit a spurious interrupt while in poll */
481 if (napi_ok)
482 __netif_rx_schedule(dev);
483 }
484 }
485
486 if (int_events & fep->ev_tx)
487 fs_enet_tx(dev);
488 }
489
490 handled = nr > 0;
491 return IRQ_RETVAL(handled);
492}
493
494void fs_init_bds(struct net_device *dev)
495{
496 struct fs_enet_private *fep = netdev_priv(dev);
497 cbd_t *bdp;
498 struct sk_buff *skb;
499 int i;
500
501 fs_cleanup_bds(dev);
502
503 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
504 fep->tx_free = fep->tx_ring;
505 fep->cur_rx = fep->rx_bd_base;
506
507 /*
508 * Initialize the receive buffer descriptors.
509 */
510 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
511 skb = dev_alloc_skb(ENET_RX_FRSIZE);
512 if (skb == NULL) {
513 printk(KERN_WARNING DRV_MODULE_NAME
514 ": %s Memory squeeze, unable to allocate skb\n",
515 dev->name);
516 break;
517 }
518 fep->rx_skbuff[i] = skb;
519 skb->dev = dev;
520 CBDW_BUFADDR(bdp,
521 dma_map_single(fep->dev, skb->data,
522 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
523 DMA_FROM_DEVICE));
524 CBDW_DATLEN(bdp, 0); /* zero */
525 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
526 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
527 }
528 /*
529 * if we failed, fillup remainder
530 */
531 for (; i < fep->rx_ring; i++, bdp++) {
532 fep->rx_skbuff[i] = NULL;
533 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
534 }
535
536 /*
537 * ...and the same for transmit.
538 */
539 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
540 fep->tx_skbuff[i] = NULL;
541 CBDW_BUFADDR(bdp, 0);
542 CBDW_DATLEN(bdp, 0);
543 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
544 }
545}
546
547void fs_cleanup_bds(struct net_device *dev)
548{
549 struct fs_enet_private *fep = netdev_priv(dev);
550 struct sk_buff *skb;
551 cbd_t *bdp;
552 int i;
553
554 /*
555 * Reset SKB transmit buffers.
556 */
557 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
558 if ((skb = fep->tx_skbuff[i]) == NULL)
559 continue;
560
561 /* unmap */
562 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
563 skb->len, DMA_TO_DEVICE);
564
565 fep->tx_skbuff[i] = NULL;
566 dev_kfree_skb(skb);
567 }
568
569 /*
570 * Reset SKB receive buffers
571 */
572 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
573 if ((skb = fep->rx_skbuff[i]) == NULL)
574 continue;
575
576 /* unmap */
577 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
578 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
579 DMA_FROM_DEVICE);
580
581 fep->rx_skbuff[i] = NULL;
582
583 dev_kfree_skb(skb);
584 }
585}
586
587/**********************************************************************************/
588
589static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
590{
591 struct fs_enet_private *fep = netdev_priv(dev);
592 cbd_t *bdp;
593 int curidx;
594 u16 sc;
595 unsigned long flags;
596
597 spin_lock_irqsave(&fep->tx_lock, flags);
598
599 /*
600 * Fill in a Tx ring entry
601 */
602 bdp = fep->cur_tx;
603
604 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
605 netif_stop_queue(dev);
606 spin_unlock_irqrestore(&fep->tx_lock, flags);
607
608 /*
609 * Ooops. All transmit buffers are full. Bail out.
610 * This should not happen, since the tx queue should be stopped.
611 */
612 printk(KERN_WARNING DRV_MODULE_NAME
613 ": %s tx queue full!.\n", dev->name);
614 return NETDEV_TX_BUSY;
615 }
616
617 curidx = bdp - fep->tx_bd_base;
618 /*
619 * Clear all of the status flags.
620 */
621 CBDC_SC(bdp, BD_ENET_TX_STATS);
622
623 /*
624 * Save skb pointer.
625 */
626 fep->tx_skbuff[curidx] = skb;
627
628 fep->stats.tx_bytes += skb->len;
629
630 /*
631 * Push the data cache so the CPM does not get stale memory data.
632 */
633 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
634 skb->data, skb->len, DMA_TO_DEVICE));
635 CBDW_DATLEN(bdp, skb->len);
636
637 dev->trans_start = jiffies;
638
639 /*
640 * If this was the last BD in the ring, start at the beginning again.
641 */
642 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
643 fep->cur_tx++;
644 else
645 fep->cur_tx = fep->tx_bd_base;
646
647 if (!--fep->tx_free)
648 netif_stop_queue(dev);
649
650 /* Trigger transmission start */
651 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
652 BD_ENET_TX_LAST | BD_ENET_TX_TC;
653
654 /* note that while FEC does not have this bit
655 * it marks it as available for software use
656 * yay for hw reuse :) */
657 if (skb->len <= 60)
658 sc |= BD_ENET_TX_PAD;
659 CBDS_SC(bdp, sc);
660
661 (*fep->ops->tx_kickstart)(dev);
662
663 spin_unlock_irqrestore(&fep->tx_lock, flags);
664
665 return NETDEV_TX_OK;
666}
667
668static int fs_request_irq(struct net_device *dev, int irq, const char *name,
669 irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs))
670{
671 struct fs_enet_private *fep = netdev_priv(dev);
672
673 (*fep->ops->pre_request_irq)(dev, irq);
674 return request_irq(irq, irqf, SA_SHIRQ, name, dev);
675}
676
677static void fs_free_irq(struct net_device *dev, int irq)
678{
679 struct fs_enet_private *fep = netdev_priv(dev);
680
681 free_irq(irq, dev);
682 (*fep->ops->post_free_irq)(dev, irq);
683}
684
685/**********************************************************************************/
686
687/* This interrupt occurs when the PHY detects a link change. */
688static irqreturn_t
689fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
690{
691 struct net_device *dev = dev_id;
692 struct fs_enet_private *fep;
693 const struct fs_platform_info *fpi;
694
695 fep = netdev_priv(dev);
696 fpi = fep->fpi;
697
698 /*
699 * Acknowledge the interrupt if possible. If we have not
700 * found the PHY yet we can't process or acknowledge the
701 * interrupt now. Instead we ignore this interrupt for now,
702 * which we can do since it is edge triggered. It will be
703 * acknowledged later by fs_enet_open().
704 */
705 if (!fep->phy)
706 return IRQ_NONE;
707
708 fs_mii_ack_int(dev);
709 fs_mii_link_status_change_check(dev, 0);
710
711 return IRQ_HANDLED;
712}
713
714static void fs_timeout(struct net_device *dev)
715{
716 struct fs_enet_private *fep = netdev_priv(dev);
717 unsigned long flags;
718 int wake = 0;
719
720 fep->stats.tx_errors++;
721
722 spin_lock_irqsave(&fep->lock, flags);
723
724 if (dev->flags & IFF_UP) {
725 (*fep->ops->stop)(dev);
726 (*fep->ops->restart)(dev);
727 }
728
729 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
730 spin_unlock_irqrestore(&fep->lock, flags);
731
732 if (wake)
733 netif_wake_queue(dev);
734}
735
736static int fs_enet_open(struct net_device *dev)
737{
738 struct fs_enet_private *fep = netdev_priv(dev);
739 const struct fs_platform_info *fpi = fep->fpi;
740 int r;
741
742 /* Install our interrupt handler. */
743 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
744 if (r != 0) {
745 printk(KERN_ERR DRV_MODULE_NAME
746 ": %s Could not allocate FEC IRQ!", dev->name);
747 return -EINVAL;
748 }
749
750 /* Install our phy interrupt handler */
751 if (fpi->phy_irq != -1) {
752
753 r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
754 if (r != 0) {
755 printk(KERN_ERR DRV_MODULE_NAME
756 ": %s Could not allocate PHY IRQ!", dev->name);
757 fs_free_irq(dev, fep->interrupt);
758 return -EINVAL;
759 }
760 }
761
762 fs_mii_startup(dev);
763 netif_carrier_off(dev);
764 fs_mii_link_status_change_check(dev, 1);
765
766 return 0;
767}
768
769static int fs_enet_close(struct net_device *dev)
770{
771 struct fs_enet_private *fep = netdev_priv(dev);
772 const struct fs_platform_info *fpi = fep->fpi;
773 unsigned long flags;
774
775 netif_stop_queue(dev);
776 netif_carrier_off(dev);
777 fs_mii_shutdown(dev);
778
779 spin_lock_irqsave(&fep->lock, flags);
780 (*fep->ops->stop)(dev);
781 spin_unlock_irqrestore(&fep->lock, flags);
782
783 /* release any irqs */
784 if (fpi->phy_irq != -1)
785 fs_free_irq(dev, fpi->phy_irq);
786 fs_free_irq(dev, fep->interrupt);
787
788 return 0;
789}
790
791static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
792{
793 struct fs_enet_private *fep = netdev_priv(dev);
794 return &fep->stats;
795}
796
797/*************************************************************************/
798
799static void fs_get_drvinfo(struct net_device *dev,
800 struct ethtool_drvinfo *info)
801{
802 strcpy(info->driver, DRV_MODULE_NAME);
803 strcpy(info->version, DRV_MODULE_VERSION);
804}
805
806static int fs_get_regs_len(struct net_device *dev)
807{
808 struct fs_enet_private *fep = netdev_priv(dev);
809
810 return (*fep->ops->get_regs_len)(dev);
811}
812
813static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
814 void *p)
815{
816 struct fs_enet_private *fep = netdev_priv(dev);
817 unsigned long flags;
818 int r, len;
819
820 len = regs->len;
821
822 spin_lock_irqsave(&fep->lock, flags);
823 r = (*fep->ops->get_regs)(dev, p, &len);
824 spin_unlock_irqrestore(&fep->lock, flags);
825
826 if (r == 0)
827 regs->version = 0;
828}
829
830static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
831{
832 struct fs_enet_private *fep = netdev_priv(dev);
833 unsigned long flags;
834 int rc;
835
836 spin_lock_irqsave(&fep->lock, flags);
837 rc = mii_ethtool_gset(&fep->mii_if, cmd);
838 spin_unlock_irqrestore(&fep->lock, flags);
839
840 return rc;
841}
842
843static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
844{
845 struct fs_enet_private *fep = netdev_priv(dev);
846 unsigned long flags;
847 int rc;
848
849 spin_lock_irqsave(&fep->lock, flags);
850 rc = mii_ethtool_sset(&fep->mii_if, cmd);
851 spin_unlock_irqrestore(&fep->lock, flags);
852
853 return rc;
854}
855
856static int fs_nway_reset(struct net_device *dev)
857{
858 struct fs_enet_private *fep = netdev_priv(dev);
859 return mii_nway_restart(&fep->mii_if);
860}
861
862static u32 fs_get_msglevel(struct net_device *dev)
863{
864 struct fs_enet_private *fep = netdev_priv(dev);
865 return fep->msg_enable;
866}
867
868static void fs_set_msglevel(struct net_device *dev, u32 value)
869{
870 struct fs_enet_private *fep = netdev_priv(dev);
871 fep->msg_enable = value;
872}
873
874static struct ethtool_ops fs_ethtool_ops = {
875 .get_drvinfo = fs_get_drvinfo,
876 .get_regs_len = fs_get_regs_len,
877 .get_settings = fs_get_settings,
878 .set_settings = fs_set_settings,
879 .nway_reset = fs_nway_reset,
880 .get_link = ethtool_op_get_link,
881 .get_msglevel = fs_get_msglevel,
882 .set_msglevel = fs_set_msglevel,
883 .get_tx_csum = ethtool_op_get_tx_csum,
884 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
885 .get_sg = ethtool_op_get_sg,
886 .set_sg = ethtool_op_set_sg,
887 .get_regs = fs_get_regs,
888};
889
890static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
891{
892 struct fs_enet_private *fep = netdev_priv(dev);
893 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
894 unsigned long flags;
895 int rc;
896
897 if (!netif_running(dev))
898 return -EINVAL;
899
900 spin_lock_irqsave(&fep->lock, flags);
901 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
902 spin_unlock_irqrestore(&fep->lock, flags);
903 return rc;
904}
905
906extern int fs_mii_connect(struct net_device *dev);
907extern void fs_mii_disconnect(struct net_device *dev);
908
909static struct net_device *fs_init_instance(struct device *dev,
910 const struct fs_platform_info *fpi)
911{
912 struct net_device *ndev = NULL;
913 struct fs_enet_private *fep = NULL;
914 int privsize, i, r, err = 0, registered = 0;
915
916 /* guard */
917 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
918 return ERR_PTR(-EINVAL);
919
920 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
921 (fpi->rx_ring + fpi->tx_ring));
922
923 ndev = alloc_etherdev(privsize);
924 if (!ndev) {
925 err = -ENOMEM;
926 goto err;
927 }
928 SET_MODULE_OWNER(ndev);
929
930 fep = netdev_priv(ndev);
931 memset(fep, 0, privsize); /* clear everything */
932
933 fep->dev = dev;
934 dev_set_drvdata(dev, ndev);
935 fep->fpi = fpi;
936 if (fpi->init_ioports)
937 fpi->init_ioports();
938
939#ifdef CONFIG_FS_ENET_HAS_FEC
940 if (fs_get_fec_index(fpi->fs_no) >= 0)
941 fep->ops = &fs_fec_ops;
942#endif
943
944#ifdef CONFIG_FS_ENET_HAS_SCC
945 if (fs_get_scc_index(fpi->fs_no) >=0 )
946 fep->ops = &fs_scc_ops;
947#endif
948
949#ifdef CONFIG_FS_ENET_HAS_FCC
950 if (fs_get_fcc_index(fpi->fs_no) >= 0)
951 fep->ops = &fs_fcc_ops;
952#endif
953
954 if (fep->ops == NULL) {
955 printk(KERN_ERR DRV_MODULE_NAME
956 ": %s No matching ops found (%d).\n",
957 ndev->name, fpi->fs_no);
958 err = -EINVAL;
959 goto err;
960 }
961
962 r = (*fep->ops->setup_data)(ndev);
963 if (r != 0) {
964 printk(KERN_ERR DRV_MODULE_NAME
965 ": %s setup_data failed\n",
966 ndev->name);
967 err = r;
968 goto err;
969 }
970
971 /* point rx_skbuff, tx_skbuff */
972 fep->rx_skbuff = (struct sk_buff **)&fep[1];
973 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
974
975 /* init locks */
976 spin_lock_init(&fep->lock);
977 spin_lock_init(&fep->tx_lock);
978
979 /*
980 * Set the Ethernet address.
981 */
982 for (i = 0; i < 6; i++)
983 ndev->dev_addr[i] = fpi->macaddr[i];
984
985 r = (*fep->ops->allocate_bd)(ndev);
986
987 if (fep->ring_base == NULL) {
988 printk(KERN_ERR DRV_MODULE_NAME
989 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
990 err = r;
991 goto err;
992 }
993
994 /*
995 * Set receive and transmit descriptor base.
996 */
997 fep->rx_bd_base = fep->ring_base;
998 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
999
1000 /* initialize ring size variables */
1001 fep->tx_ring = fpi->tx_ring;
1002 fep->rx_ring = fpi->rx_ring;
1003
1004 /*
1005 * The FEC Ethernet specific entries in the device structure.
1006 */
1007 ndev->open = fs_enet_open;
1008 ndev->hard_start_xmit = fs_enet_start_xmit;
1009 ndev->tx_timeout = fs_timeout;
1010 ndev->watchdog_timeo = 2 * HZ;
1011 ndev->stop = fs_enet_close;
1012 ndev->get_stats = fs_enet_get_stats;
1013 ndev->set_multicast_list = fs_set_multicast_list;
1014 if (fpi->use_napi) {
1015 ndev->poll = fs_enet_rx_napi;
1016 ndev->weight = fpi->napi_weight;
1017 }
1018 ndev->ethtool_ops = &fs_ethtool_ops;
1019 ndev->do_ioctl = fs_ioctl;
1020
1021 init_timer(&fep->phy_timer_list);
1022
1023 netif_carrier_off(ndev);
1024
1025 err = register_netdev(ndev);
1026 if (err != 0) {
1027 printk(KERN_ERR DRV_MODULE_NAME
1028 ": %s register_netdev failed.\n", ndev->name);
1029 goto err;
1030 }
1031 registered = 1;
1032
1033 err = fs_mii_connect(ndev);
1034 if (err != 0) {
1035 printk(KERN_ERR DRV_MODULE_NAME
1036 ": %s fs_mii_connect failed.\n", ndev->name);
1037 goto err;
1038 }
1039
1040 return ndev;
1041
1042 err:
1043 if (ndev != NULL) {
1044
1045 if (registered)
1046 unregister_netdev(ndev);
1047
1048 if (fep != NULL) {
1049 (*fep->ops->free_bd)(ndev);
1050 (*fep->ops->cleanup_data)(ndev);
1051 }
1052
1053 free_netdev(ndev);
1054 }
1055
1056 dev_set_drvdata(dev, NULL);
1057
1058 return ERR_PTR(err);
1059}
1060
1061static int fs_cleanup_instance(struct net_device *ndev)
1062{
1063 struct fs_enet_private *fep;
1064 const struct fs_platform_info *fpi;
1065 struct device *dev;
1066
1067 if (ndev == NULL)
1068 return -EINVAL;
1069
1070 fep = netdev_priv(ndev);
1071 if (fep == NULL)
1072 return -EINVAL;
1073
1074 fpi = fep->fpi;
1075
1076 fs_mii_disconnect(ndev);
1077
1078 unregister_netdev(ndev);
1079
1080 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1081 fep->ring_base, fep->ring_mem_addr);
1082
1083 /* reset it */
1084 (*fep->ops->cleanup_data)(ndev);
1085
1086 dev = fep->dev;
1087 if (dev != NULL) {
1088 dev_set_drvdata(dev, NULL);
1089 fep->dev = NULL;
1090 }
1091
1092 free_netdev(ndev);
1093
1094 return 0;
1095}
1096
1097/**************************************************************************************/
1098
1099/* handy pointer to the immap */
1100void *fs_enet_immap = NULL;
1101
1102static int setup_immap(void)
1103{
1104 phys_addr_t paddr = 0;
1105 unsigned long size = 0;
1106
1107#ifdef CONFIG_CPM1
1108 paddr = IMAP_ADDR;
1109 size = 0x10000; /* map 64K */
1110#endif
1111
1112#ifdef CONFIG_CPM2
1113 paddr = CPM_MAP_ADDR;
1114 size = 0x40000; /* map 256 K */
1115#endif
1116 fs_enet_immap = ioremap(paddr, size);
1117 if (fs_enet_immap == NULL)
1118 return -EBADF; /* XXX ahem; maybe just BUG_ON? */
1119
1120 return 0;
1121}
1122
1123static void cleanup_immap(void)
1124{
1125 if (fs_enet_immap != NULL) {
1126 iounmap(fs_enet_immap);
1127 fs_enet_immap = NULL;
1128 }
1129}
1130
1131/**************************************************************************************/
1132
1133static int __devinit fs_enet_probe(struct device *dev)
1134{
1135 struct net_device *ndev;
1136
1137 /* no fixup - no device */
1138 if (dev->platform_data == NULL) {
1139 printk(KERN_INFO "fs_enet: "
1140 "probe called with no platform data; "
1141 "remove unused devices\n");
1142 return -ENODEV;
1143 }
1144
1145 ndev = fs_init_instance(dev, dev->platform_data);
1146 if (IS_ERR(ndev))
1147 return PTR_ERR(ndev);
1148 return 0;
1149}
1150
1151static int fs_enet_remove(struct device *dev)
1152{
1153 return fs_cleanup_instance(dev_get_drvdata(dev));
1154}
1155
1156static struct device_driver fs_enet_fec_driver = {
1157 .name = "fsl-cpm-fec",
1158 .bus = &platform_bus_type,
1159 .probe = fs_enet_probe,
1160 .remove = fs_enet_remove,
1161#ifdef CONFIG_PM
1162/* .suspend = fs_enet_suspend, TODO */
1163/* .resume = fs_enet_resume, TODO */
1164#endif
1165};
1166
1167static struct device_driver fs_enet_scc_driver = {
1168 .name = "fsl-cpm-scc",
1169 .bus = &platform_bus_type,
1170 .probe = fs_enet_probe,
1171 .remove = fs_enet_remove,
1172#ifdef CONFIG_PM
1173/* .suspend = fs_enet_suspend, TODO */
1174/* .resume = fs_enet_resume, TODO */
1175#endif
1176};
1177
1178static struct device_driver fs_enet_fcc_driver = {
1179 .name = "fsl-cpm-fcc",
1180 .bus = &platform_bus_type,
1181 .probe = fs_enet_probe,
1182 .remove = fs_enet_remove,
1183#ifdef CONFIG_PM
1184/* .suspend = fs_enet_suspend, TODO */
1185/* .resume = fs_enet_resume, TODO */
1186#endif
1187};
1188
1189static int __init fs_init(void)
1190{
1191 int r;
1192
1193 printk(KERN_INFO
1194 "%s", version);
1195
1196 r = setup_immap();
1197 if (r != 0)
1198 return r;
1199 r = driver_register(&fs_enet_fec_driver);
1200 if (r != 0)
1201 goto err;
1202
1203 r = driver_register(&fs_enet_fcc_driver);
1204 if (r != 0)
1205 goto err;
1206
1207 r = driver_register(&fs_enet_scc_driver);
1208 if (r != 0)
1209 goto err;
1210
1211 return 0;
1212err:
1213 cleanup_immap();
1214 return r;
1215
1216}
1217
1218static void __exit fs_cleanup(void)
1219{
1220 driver_unregister(&fs_enet_fec_driver);
1221 driver_unregister(&fs_enet_fcc_driver);
1222 driver_unregister(&fs_enet_scc_driver);
1223 cleanup_immap();
1224}
1225
1226/**************************************************************************************/
1227
1228module_init(fs_init);
1229module_exit(fs_cleanup);
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
new file mode 100644
index 000000000000..c6770377ef87
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -0,0 +1,507 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/string.h>
25#include <linux/ptrace.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/spinlock.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39#include <linux/bitops.h>
40
41#include <asm/pgtable.h>
42#include <asm/irq.h>
43#include <asm/uaccess.h>
44
45#include "fs_enet.h"
46
47/*************************************************/
48
49/*
50 * Generic PHY support.
51 * Should work for all PHYs, but link change is detected by polling
52 */
53
54static void generic_timer_callback(unsigned long data)
55{
56 struct net_device *dev = (struct net_device *)data;
57 struct fs_enet_private *fep = netdev_priv(dev);
58
59 fep->phy_timer_list.expires = jiffies + HZ / 2;
60
61 add_timer(&fep->phy_timer_list);
62
63 fs_mii_link_status_change_check(dev, 0);
64}
65
66static void generic_startup(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
71 fep->phy_timer_list.data = (unsigned long)dev;
72 fep->phy_timer_list.function = generic_timer_callback;
73 add_timer(&fep->phy_timer_list);
74}
75
76static void generic_shutdown(struct net_device *dev)
77{
78 struct fs_enet_private *fep = netdev_priv(dev);
79
80 del_timer_sync(&fep->phy_timer_list);
81}
82
83/* ------------------------------------------------------------------------- */
84/* The Davicom DM9161 is used on the NETTA board */
85
86/* register definitions */
87
88#define MII_DM9161_ANAR 4 /* Aux. Config Register */
89#define MII_DM9161_ACR 16 /* Aux. Config Register */
90#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
91#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
92#define MII_DM9161_INTR 21 /* Interrupt Register */
93#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
94#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
95
96static void dm9161_startup(struct net_device *dev)
97{
98 struct fs_enet_private *fep = netdev_priv(dev);
99
100 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
101 /* Start autonegotiation */
102 fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
103
104 set_current_state(TASK_UNINTERRUPTIBLE);
105 schedule_timeout(HZ*8);
106}
107
108static void dm9161_ack_int(struct net_device *dev)
109{
110 struct fs_enet_private *fep = netdev_priv(dev);
111
112 fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
113}
114
115static void dm9161_shutdown(struct net_device *dev)
116{
117 struct fs_enet_private *fep = netdev_priv(dev);
118
119 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
120}
121
122/**********************************************************************************/
123
124static const struct phy_info phy_info[] = {
125 {
126 .id = 0x00181b88,
127 .name = "DM9161",
128 .startup = dm9161_startup,
129 .ack_int = dm9161_ack_int,
130 .shutdown = dm9161_shutdown,
131 }, {
132 .id = 0,
133 .name = "GENERIC",
134 .startup = generic_startup,
135 .shutdown = generic_shutdown,
136 },
137};
138
139/**********************************************************************************/
140
141static int phy_id_detect(struct net_device *dev)
142{
143 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145 struct fs_enet_mii_bus *bus = fep->mii_bus;
146 int i, r, start, end, phytype, physubtype;
147 const struct phy_info *phy;
148 int phy_hwid, phy_id;
149
150 phy_hwid = -1;
151 fep->phy = NULL;
152
153 /* auto-detect? */
154 if (fpi->phy_addr == -1) {
155 start = 1;
156 end = 32;
157 } else { /* direct */
158 start = fpi->phy_addr;
159 end = start + 1;
160 }
161
162 for (phy_id = start; phy_id < end; phy_id++) {
163 /* skip already used phy addresses on this bus */
164 if (bus->usage_map & (1 << phy_id))
165 continue;
166 r = fs_mii_read(dev, phy_id, MII_PHYSID1);
167 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
168 continue;
169 r = fs_mii_read(dev, phy_id, MII_PHYSID2);
170 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
171 continue;
172 phy_hwid = (phytype << 16) | physubtype;
173 if (phy_hwid != -1)
174 break;
175 }
176
177 if (phy_hwid == -1) {
178 printk(KERN_ERR DRV_MODULE_NAME
179 ": %s No PHY detected! range=0x%02x-0x%02x\n",
180 dev->name, start, end);
181 return -1;
182 }
183
184 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
185 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
186 break;
187
188 if (i >= ARRAY_SIZE(phy_info)) {
189 printk(KERN_ERR DRV_MODULE_NAME
190 ": %s PHY id 0x%08x is not supported!\n",
191 dev->name, phy_hwid);
192 return -1;
193 }
194
195 fep->phy = phy;
196
197 /* mark this address as used */
198 bus->usage_map |= (1 << phy_id);
199
200 printk(KERN_INFO DRV_MODULE_NAME
201 ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
202 dev->name, phy_id, fep->phy->name, phy_hwid,
203 fpi->phy_addr == -1 ? " (auto-detected)" : "");
204
205 return phy_id;
206}
207
208void fs_mii_startup(struct net_device *dev)
209{
210 struct fs_enet_private *fep = netdev_priv(dev);
211
212 if (fep->phy->startup)
213 (*fep->phy->startup) (dev);
214}
215
216void fs_mii_shutdown(struct net_device *dev)
217{
218 struct fs_enet_private *fep = netdev_priv(dev);
219
220 if (fep->phy->shutdown)
221 (*fep->phy->shutdown) (dev);
222}
223
224void fs_mii_ack_int(struct net_device *dev)
225{
226 struct fs_enet_private *fep = netdev_priv(dev);
227
228 if (fep->phy->ack_int)
229 (*fep->phy->ack_int) (dev);
230}
231
232#define MII_LINK 0x0001
233#define MII_HALF 0x0002
234#define MII_FULL 0x0004
235#define MII_BASE4 0x0008
236#define MII_10M 0x0010
237#define MII_100M 0x0020
238#define MII_1G 0x0040
239#define MII_10G 0x0080
240
241/* return full mii info at one gulp, with a usable form */
242static unsigned int mii_full_status(struct mii_if_info *mii)
243{
244 unsigned int status;
245 int bmsr, adv, lpa, neg;
246 struct fs_enet_private* fep = netdev_priv(mii->dev);
247
248 /* first, a dummy read, needed to latch some MII phys */
249 (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
250 bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
251
252 /* no link */
253 if ((bmsr & BMSR_LSTATUS) == 0)
254 return 0;
255
256 status = MII_LINK;
257
258 /* Lets look what ANEG says if it's supported - otherwize we shall
259 take the right values from the platform info*/
260 if(!mii->force_media) {
261 /* autoneg not completed; don't bother */
262 if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
263 return 0;
264
265 adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
266 lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
267
268 neg = lpa & adv;
269 } else {
270 neg = fep->fpi->bus_info->lpa;
271 }
272
273 if (neg & LPA_100FULL)
274 status |= MII_FULL | MII_100M;
275 else if (neg & LPA_100BASE4)
276 status |= MII_FULL | MII_BASE4 | MII_100M;
277 else if (neg & LPA_100HALF)
278 status |= MII_HALF | MII_100M;
279 else if (neg & LPA_10FULL)
280 status |= MII_FULL | MII_10M;
281 else
282 status |= MII_HALF | MII_10M;
283
284 return status;
285}
286
287void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
288{
289 struct fs_enet_private *fep = netdev_priv(dev);
290 struct mii_if_info *mii = &fep->mii_if;
291 unsigned int mii_status;
292 int ok_to_print, link, duplex, speed;
293 unsigned long flags;
294
295 ok_to_print = netif_msg_link(fep);
296
297 mii_status = mii_full_status(mii);
298
299 if (!init_media && mii_status == fep->last_mii_status)
300 return;
301
302 fep->last_mii_status = mii_status;
303
304 link = !!(mii_status & MII_LINK);
305 duplex = !!(mii_status & MII_FULL);
306 speed = (mii_status & MII_100M) ? 100 : 10;
307
308 if (link == 0) {
309 netif_carrier_off(mii->dev);
310 netif_stop_queue(dev);
311 if (!init_media) {
312 spin_lock_irqsave(&fep->lock, flags);
313 (*fep->ops->stop)(dev);
314 spin_unlock_irqrestore(&fep->lock, flags);
315 }
316
317 if (ok_to_print)
318 printk(KERN_INFO "%s: link down\n", mii->dev->name);
319
320 } else {
321
322 mii->full_duplex = duplex;
323
324 netif_carrier_on(mii->dev);
325
326 spin_lock_irqsave(&fep->lock, flags);
327 fep->duplex = duplex;
328 fep->speed = speed;
329 (*fep->ops->restart)(dev);
330 spin_unlock_irqrestore(&fep->lock, flags);
331
332 netif_start_queue(dev);
333
334 if (ok_to_print)
335 printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
336 dev->name, speed, duplex ? "full" : "half");
337 }
338}
339
340/**********************************************************************************/
341
342int fs_mii_read(struct net_device *dev, int phy_id, int location)
343{
344 struct fs_enet_private *fep = netdev_priv(dev);
345 struct fs_enet_mii_bus *bus = fep->mii_bus;
346
347 unsigned long flags;
348 int ret;
349
350 spin_lock_irqsave(&bus->mii_lock, flags);
351 ret = (*bus->mii_read)(bus, phy_id, location);
352 spin_unlock_irqrestore(&bus->mii_lock, flags);
353
354 return ret;
355}
356
357void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
358{
359 struct fs_enet_private *fep = netdev_priv(dev);
360 struct fs_enet_mii_bus *bus = fep->mii_bus;
361 unsigned long flags;
362
363 spin_lock_irqsave(&bus->mii_lock, flags);
364 (*bus->mii_write)(bus, phy_id, location, value);
365 spin_unlock_irqrestore(&bus->mii_lock, flags);
366}
367
368/*****************************************************************************/
369
370/* list of all registered mii buses */
371static LIST_HEAD(fs_mii_bus_list);
372
373static struct fs_enet_mii_bus *lookup_bus(int method, int id)
374{
375 struct list_head *ptr;
376 struct fs_enet_mii_bus *bus;
377
378 list_for_each(ptr, &fs_mii_bus_list) {
379 bus = list_entry(ptr, struct fs_enet_mii_bus, list);
380 if (bus->bus_info->method == method &&
381 bus->bus_info->id == id)
382 return bus;
383 }
384 return NULL;
385}
386
387static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
388{
389 struct fs_enet_mii_bus *bus;
390 int ret = 0;
391
392 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
393 if (bus == NULL) {
394 ret = -ENOMEM;
395 goto err;
396 }
397 memset(bus, 0, sizeof(*bus));
398 spin_lock_init(&bus->mii_lock);
399 bus->bus_info = bi;
400 bus->refs = 0;
401 bus->usage_map = 0;
402
403 /* perform initialization */
404 switch (bi->method) {
405
406 case fsmii_fixed:
407 ret = fs_mii_fixed_init(bus);
408 if (ret != 0)
409 goto err;
410 break;
411
412 case fsmii_bitbang:
413 ret = fs_mii_bitbang_init(bus);
414 if (ret != 0)
415 goto err;
416 break;
417#ifdef CONFIG_FS_ENET_HAS_FEC
418 case fsmii_fec:
419 ret = fs_mii_fec_init(bus);
420 if (ret != 0)
421 goto err;
422 break;
423#endif
424 default:
425 ret = -EINVAL;
426 goto err;
427 }
428
429 list_add(&bus->list, &fs_mii_bus_list);
430
431 return bus;
432
433err:
434 if (bus)
435 kfree(bus);
436 return ERR_PTR(ret);
437}
438
439static void destroy_bus(struct fs_enet_mii_bus *bus)
440{
441 /* remove from bus list */
442 list_del(&bus->list);
443
444 /* nothing more needed */
445 kfree(bus);
446}
447
448int fs_mii_connect(struct net_device *dev)
449{
450 struct fs_enet_private *fep = netdev_priv(dev);
451 const struct fs_platform_info *fpi = fep->fpi;
452 struct fs_enet_mii_bus *bus = NULL;
453
454 /* check method validity */
455 switch (fpi->bus_info->method) {
456 case fsmii_fixed:
457 case fsmii_bitbang:
458 break;
459#ifdef CONFIG_FS_ENET_HAS_FEC
460 case fsmii_fec:
461 break;
462#endif
463 default:
464 printk(KERN_ERR DRV_MODULE_NAME
465 ": %s Unknown MII bus method (%d)!\n",
466 dev->name, fpi->bus_info->method);
467 return -EINVAL;
468 }
469
470 bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
471
472 /* if not found create new bus */
473 if (bus == NULL) {
474 bus = create_bus(fpi->bus_info);
475 if (IS_ERR(bus)) {
476 printk(KERN_ERR DRV_MODULE_NAME
477 ": %s MII bus creation failure!\n", dev->name);
478 return PTR_ERR(bus);
479 }
480 }
481
482 bus->refs++;
483
484 fep->mii_bus = bus;
485
486 fep->mii_if.dev = dev;
487 fep->mii_if.phy_id_mask = 0x1f;
488 fep->mii_if.reg_num_mask = 0x1f;
489 fep->mii_if.mdio_read = fs_mii_read;
490 fep->mii_if.mdio_write = fs_mii_write;
491 fep->mii_if.force_media = fpi->bus_info->disable_aneg;
492 fep->mii_if.phy_id = phy_id_detect(dev);
493
494 return 0;
495}
496
497void fs_mii_disconnect(struct net_device *dev)
498{
499 struct fs_enet_private *fep = netdev_priv(dev);
500 struct fs_enet_mii_bus *bus = NULL;
501
502 bus = fep->mii_bus;
503 fep->mii_bus = NULL;
504
505 if (--bus->refs <= 0)
506 destroy_bus(bus);
507}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
new file mode 100644
index 000000000000..1105543b9d88
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -0,0 +1,245 @@
1#ifndef FS_ENET_H
2#define FS_ENET_H
3
4#include <linux/mii.h>
5#include <linux/netdevice.h>
6#include <linux/types.h>
7#include <linux/version.h>
8#include <linux/list.h>
9
10#include <linux/fs_enet_pd.h>
11
12#include <asm/dma-mapping.h>
13
14#ifdef CONFIG_CPM1
15#include <asm/commproc.h>
16#endif
17
18#ifdef CONFIG_CPM2
19#include <asm/cpm2.h>
20#endif
21
22/* hw driver ops */
23struct fs_ops {
24 int (*setup_data)(struct net_device *dev);
25 int (*allocate_bd)(struct net_device *dev);
26 void (*free_bd)(struct net_device *dev);
27 void (*cleanup_data)(struct net_device *dev);
28 void (*set_multicast_list)(struct net_device *dev);
29 void (*restart)(struct net_device *dev);
30 void (*stop)(struct net_device *dev);
31 void (*pre_request_irq)(struct net_device *dev, int irq);
32 void (*post_free_irq)(struct net_device *dev, int irq);
33 void (*napi_clear_rx_event)(struct net_device *dev);
34 void (*napi_enable_rx)(struct net_device *dev);
35 void (*napi_disable_rx)(struct net_device *dev);
36 void (*rx_bd_done)(struct net_device *dev);
37 void (*tx_kickstart)(struct net_device *dev);
38 u32 (*get_int_events)(struct net_device *dev);
39 void (*clear_int_events)(struct net_device *dev, u32 int_events);
40 void (*ev_error)(struct net_device *dev, u32 int_events);
41 int (*get_regs)(struct net_device *dev, void *p, int *sizep);
42 int (*get_regs_len)(struct net_device *dev);
43 void (*tx_restart)(struct net_device *dev);
44};
45
46struct phy_info {
47 unsigned int id;
48 const char *name;
49 void (*startup) (struct net_device * dev);
50 void (*shutdown) (struct net_device * dev);
51 void (*ack_int) (struct net_device * dev);
52};
53
54/* The FEC stores dest/src/type, data, and checksum for receive packets.
55 */
56#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
57#define MIN_MTU 46 /* this is data size */
58#define CRC_LEN 4
59
60#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
61#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
62
63/* Must be a multiple of 32 (to cover both FEC & FCC) */
64#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
65/* This is needed so that invalidate_xxx wont invalidate too much */
66#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
67
68struct fs_enet_mii_bus {
69 struct list_head list;
70 spinlock_t mii_lock;
71 const struct fs_mii_bus_info *bus_info;
72 int refs;
73 u32 usage_map;
74
75 int (*mii_read)(struct fs_enet_mii_bus *bus,
76 int phy_id, int location);
77
78 void (*mii_write)(struct fs_enet_mii_bus *bus,
79 int phy_id, int location, int value);
80
81 union {
82 struct {
83 unsigned int mii_speed;
84 void *fecp;
85 } fec;
86
87 struct {
88 /* note that the actual port size may */
89 /* be different; cpm(s) handle it OK */
90 u8 mdio_msk;
91 u8 *mdio_dir;
92 u8 *mdio_dat;
93 u8 mdc_msk;
94 u8 *mdc_dir;
95 u8 *mdc_dat;
96 } bitbang;
97
98 struct {
99 u16 lpa;
100 } fixed;
101 };
102};
103
104int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
105int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
106int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
107
108struct fs_enet_private {
109 struct device *dev; /* pointer back to the device (must be initialized first) */
110 spinlock_t lock; /* during all ops except TX pckt processing */
111 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
112 const struct fs_platform_info *fpi;
113 const struct fs_ops *ops;
114 int rx_ring, tx_ring;
115 dma_addr_t ring_mem_addr;
116 void *ring_base;
117 struct sk_buff **rx_skbuff;
118 struct sk_buff **tx_skbuff;
119 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
120 cbd_t *tx_bd_base;
121 cbd_t *dirty_tx; /* ring entries to be free()ed. */
122 cbd_t *cur_rx;
123 cbd_t *cur_tx;
124 int tx_free;
125 struct net_device_stats stats;
126 struct timer_list phy_timer_list;
127 const struct phy_info *phy;
128 u32 msg_enable;
129 struct mii_if_info mii_if;
130 unsigned int last_mii_status;
131 struct fs_enet_mii_bus *mii_bus;
132 int interrupt;
133
134 int duplex, speed; /* current settings */
135
136 /* event masks */
137 u32 ev_napi_rx; /* mask of NAPI rx events */
138 u32 ev_rx; /* rx event mask */
139 u32 ev_tx; /* tx event mask */
140 u32 ev_err; /* error event mask */
141
142 u16 bd_rx_empty; /* mask of BD rx empty */
143 u16 bd_rx_err; /* mask of BD rx errors */
144
145 union {
146 struct {
147 int idx; /* FEC1 = 0, FEC2 = 1 */
148 void *fecp; /* hw registers */
149 u32 hthi, htlo; /* state for multicast */
150 } fec;
151
152 struct {
153 int idx; /* FCC1-3 = 0-2 */
154 void *fccp; /* hw registers */
155 void *ep; /* parameter ram */
156 void *fcccp; /* hw registers cont. */
157 void *mem; /* FCC DPRAM */
158 u32 gaddrh, gaddrl; /* group address */
159 } fcc;
160
161 struct {
162 int idx; /* FEC1 = 0, FEC2 = 1 */
163 void *sccp; /* hw registers */
164 void *ep; /* parameter ram */
165 u32 hthi, htlo; /* state for multicast */
166 } scc;
167
168 };
169};
170
171/***************************************************************************/
172
173int fs_mii_read(struct net_device *dev, int phy_id, int location);
174void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
175
176void fs_mii_startup(struct net_device *dev);
177void fs_mii_shutdown(struct net_device *dev);
178void fs_mii_ack_int(struct net_device *dev);
179
180void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
181
182void fs_init_bds(struct net_device *dev);
183void fs_cleanup_bds(struct net_device *dev);
184
185/***************************************************************************/
186
187#define DRV_MODULE_NAME "fs_enet"
188#define PFX DRV_MODULE_NAME ": "
189#define DRV_MODULE_VERSION "1.0"
190#define DRV_MODULE_RELDATE "Aug 8, 2005"
191
192/***************************************************************************/
193
194int fs_enet_platform_init(void);
195void fs_enet_platform_cleanup(void);
196
197/***************************************************************************/
198
199/* buffer descriptor access macros */
200
201/* access macros */
202#if defined(CONFIG_CPM1)
203/* for a a CPM1 __raw_xxx's are sufficient */
204#define __cbd_out32(addr, x) __raw_writel(x, addr)
205#define __cbd_out16(addr, x) __raw_writew(x, addr)
206#define __cbd_in32(addr) __raw_readl(addr)
207#define __cbd_in16(addr) __raw_readw(addr)
208#else
209/* for others play it safe */
210#define __cbd_out32(addr, x) out_be32(addr, x)
211#define __cbd_out16(addr, x) out_be16(addr, x)
212#define __cbd_in32(addr) in_be32(addr)
213#define __cbd_in16(addr) in_be16(addr)
214#endif
215
216/* write */
217#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
218#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
219#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
220
221/* read */
222#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
223#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
224#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
225
226/* set bits */
227#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
228
229/* clear bits */
230#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
231
232/*******************************************************************/
233
234extern const struct fs_ops fs_fec_ops;
235extern const struct fs_ops fs_fcc_ops;
236extern const struct fs_ops fs_scc_ops;
237
238/*******************************************************************/
239
240/* handy pointer to the immap */
241extern void *fs_enet_immap;
242
243/*******************************************************************/
244
245#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
new file mode 100644
index 000000000000..a940b96433c7
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -0,0 +1,578 @@
1/*
2 * FCC driver for Motorola MPC82xx (PQ2).
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/immap_cpm2.h>
39#include <asm/mpc8260.h>
40#include <asm/cpm2.h>
41
42#include <asm/pgtable.h>
43#include <asm/irq.h>
44#include <asm/uaccess.h>
45
46#include "fs_enet.h"
47
48/*************************************************/
49
50/* FCC access macros */
51
52#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x)
53#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x)
54#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x)
55#define __fcc_in32(addr) in_be32((unsigned *)addr)
56#define __fcc_in16(addr) in_be16((unsigned short *)addr)
57#define __fcc_in8(addr) in_8((unsigned char *)addr)
58
59/* parameter space */
60
61/* write, read, set bits, clear bits */
62#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v))
63#define R32(_p, _m) __fcc_in32(&(_p)->_m)
64#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
65#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
66
67#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v))
68#define R16(_p, _m) __fcc_in16(&(_p)->_m)
69#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
70#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
71
72#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v))
73#define R8(_p, _m) __fcc_in8(&(_p)->_m)
74#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
75#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
76
77/*************************************************/
78
79#define FCC_MAX_MULTICAST_ADDRS 64
80
81#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
82#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
83#define mk_mii_end 0
84
85#define MAX_CR_CMD_LOOPS 10000
86
87static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op)
88{
89 const struct fs_platform_info *fpi = fep->fpi;
90
91 cpm2_map_t *immap = fs_enet_immap;
92 cpm_cpm2_t *cpmp = &immap->im_cpm;
93 u32 v;
94 int i;
95
96 /* Currently I don't know what feature call will look like. But
97 I guess there'd be something like do_cpm_cmd() which will require page & sblock */
98 v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op);
99 W32(cpmp, cp_cpcr, v | CPM_CR_FLG);
100 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
101 if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
102 break;
103
104 if (i >= MAX_CR_CMD_LOOPS) {
105 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
106 __FUNCTION__);
107 return 1;
108 }
109
110 return 0;
111}
112
113static int do_pd_setup(struct fs_enet_private *fep)
114{
115 struct platform_device *pdev = to_platform_device(fep->dev);
116 struct resource *r;
117
118 /* Fill out IRQ field */
119 fep->interrupt = platform_get_irq(pdev, 0);
120
121 /* Attach the memory for the FCC Parameter RAM */
122 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
123 fep->fcc.ep = (void *)r->start;
124
125 if (fep->fcc.ep == NULL)
126 return -EINVAL;
127
128 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
129 fep->fcc.fccp = (void *)r->start;
130
131 if (fep->fcc.fccp == NULL)
132 return -EINVAL;
133
134 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
135
136 if (fep->fcc.fcccp == NULL)
137 return -EINVAL;
138
139 return 0;
140}
141
142#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
143#define FCC_RX_EVENT (FCC_ENET_RXF)
144#define FCC_TX_EVENT (FCC_ENET_TXB)
145#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
146
147static int setup_data(struct net_device *dev)
148{
149 struct fs_enet_private *fep = netdev_priv(dev);
150 const struct fs_platform_info *fpi = fep->fpi;
151
152 fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
153 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
154 return -EINVAL;
155
156 fep->fcc.mem = (void *)fpi->mem_offset;
157
158 if (do_pd_setup(fep) != 0)
159 return -EINVAL;
160
161 fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
162 fep->ev_rx = FCC_RX_EVENT;
163 fep->ev_tx = FCC_TX_EVENT;
164 fep->ev_err = FCC_ERR_EVENT_MSK;
165
166 return 0;
167}
168
169static int allocate_bd(struct net_device *dev)
170{
171 struct fs_enet_private *fep = netdev_priv(dev);
172 const struct fs_platform_info *fpi = fep->fpi;
173
174 fep->ring_base = dma_alloc_coherent(fep->dev,
175 (fpi->tx_ring + fpi->rx_ring) *
176 sizeof(cbd_t), &fep->ring_mem_addr,
177 GFP_KERNEL);
178 if (fep->ring_base == NULL)
179 return -ENOMEM;
180
181 return 0;
182}
183
184static void free_bd(struct net_device *dev)
185{
186 struct fs_enet_private *fep = netdev_priv(dev);
187 const struct fs_platform_info *fpi = fep->fpi;
188
189 if (fep->ring_base)
190 dma_free_coherent(fep->dev,
191 (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
192 fep->ring_base, fep->ring_mem_addr);
193}
194
195static void cleanup_data(struct net_device *dev)
196{
197 /* nothing */
198}
199
200static void set_promiscuous_mode(struct net_device *dev)
201{
202 struct fs_enet_private *fep = netdev_priv(dev);
203 fcc_t *fccp = fep->fcc.fccp;
204
205 S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
206}
207
208static void set_multicast_start(struct net_device *dev)
209{
210 struct fs_enet_private *fep = netdev_priv(dev);
211 fcc_enet_t *ep = fep->fcc.ep;
212
213 W32(ep, fen_gaddrh, 0);
214 W32(ep, fen_gaddrl, 0);
215}
216
217static void set_multicast_one(struct net_device *dev, const u8 *mac)
218{
219 struct fs_enet_private *fep = netdev_priv(dev);
220 fcc_enet_t *ep = fep->fcc.ep;
221 u16 taddrh, taddrm, taddrl;
222
223 taddrh = ((u16)mac[5] << 8) | mac[4];
224 taddrm = ((u16)mac[3] << 8) | mac[2];
225 taddrl = ((u16)mac[1] << 8) | mac[0];
226
227 W16(ep, fen_taddrh, taddrh);
228 W16(ep, fen_taddrm, taddrm);
229 W16(ep, fen_taddrl, taddrl);
230 fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR);
231}
232
233static void set_multicast_finish(struct net_device *dev)
234{
235 struct fs_enet_private *fep = netdev_priv(dev);
236 fcc_t *fccp = fep->fcc.fccp;
237 fcc_enet_t *ep = fep->fcc.ep;
238
239 /* clear promiscuous always */
240 C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
241
242 /* if all multi or too many multicasts; just enable all */
243 if ((dev->flags & IFF_ALLMULTI) != 0 ||
244 dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
245
246 W32(ep, fen_gaddrh, 0xffffffff);
247 W32(ep, fen_gaddrl, 0xffffffff);
248 }
249
250 /* read back */
251 fep->fcc.gaddrh = R32(ep, fen_gaddrh);
252 fep->fcc.gaddrl = R32(ep, fen_gaddrl);
253}
254
255static void set_multicast_list(struct net_device *dev)
256{
257 struct dev_mc_list *pmc;
258
259 if ((dev->flags & IFF_PROMISC) == 0) {
260 set_multicast_start(dev);
261 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
262 set_multicast_one(dev, pmc->dmi_addr);
263 set_multicast_finish(dev);
264 } else
265 set_promiscuous_mode(dev);
266}
267
268static void restart(struct net_device *dev)
269{
270 struct fs_enet_private *fep = netdev_priv(dev);
271 const struct fs_platform_info *fpi = fep->fpi;
272 fcc_t *fccp = fep->fcc.fccp;
273 fcc_c_t *fcccp = fep->fcc.fcccp;
274 fcc_enet_t *ep = fep->fcc.ep;
275 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
276 u16 paddrh, paddrm, paddrl;
277 u16 mem_addr;
278 const unsigned char *mac;
279 int i;
280
281 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
282
283 /* clear everything (slow & steady does it) */
284 for (i = 0; i < sizeof(*ep); i++)
285 __fcc_out8((char *)ep + i, 0);
286
287 /* get physical address */
288 rx_bd_base_phys = fep->ring_mem_addr;
289 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
290
291 /* point to bds */
292 W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
293 W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
294
295 /* Set maximum bytes per receive buffer.
296 * It must be a multiple of 32.
297 */
298 W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
299
300 W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
301 W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
302
303 /* Allocate space in the reserved FCC area of DPRAM for the
304 * internal buffers. No one uses this space (yet), so we
305 * can do this. Later, we will add resource management for
306 * this area.
307 */
308
309 mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
310
311 W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
312 W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
313 W16(ep, fen_padptr, mem_addr + 64);
314
315 /* fill with special symbol... */
316 memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
317
318 W32(ep, fen_genfcc.fcc_rbptr, 0);
319 W32(ep, fen_genfcc.fcc_tbptr, 0);
320 W32(ep, fen_genfcc.fcc_rcrc, 0);
321 W32(ep, fen_genfcc.fcc_tcrc, 0);
322 W16(ep, fen_genfcc.fcc_res1, 0);
323 W32(ep, fen_genfcc.fcc_res2, 0);
324
325 /* no CAM */
326 W32(ep, fen_camptr, 0);
327
328 /* Set CRC preset and mask */
329 W32(ep, fen_cmask, 0xdebb20e3);
330 W32(ep, fen_cpres, 0xffffffff);
331
332 W32(ep, fen_crcec, 0); /* CRC Error counter */
333 W32(ep, fen_alec, 0); /* alignment error counter */
334 W32(ep, fen_disfc, 0); /* discard frame counter */
335 W16(ep, fen_retlim, 15); /* Retry limit threshold */
336 W16(ep, fen_pper, 0); /* Normal persistence */
337
338 /* set group address */
339 W32(ep, fen_gaddrh, fep->fcc.gaddrh);
340 W32(ep, fen_gaddrl, fep->fcc.gaddrh);
341
342 /* Clear hash filter tables */
343 W32(ep, fen_iaddrh, 0);
344 W32(ep, fen_iaddrl, 0);
345
346 /* Clear the Out-of-sequence TxBD */
347 W16(ep, fen_tfcstat, 0);
348 W16(ep, fen_tfclen, 0);
349 W32(ep, fen_tfcptr, 0);
350
351 W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
352 W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
353
354 /* set address */
355 mac = dev->dev_addr;
356 paddrh = ((u16)mac[5] << 8) | mac[4];
357 paddrm = ((u16)mac[3] << 8) | mac[2];
358 paddrl = ((u16)mac[1] << 8) | mac[0];
359
360 W16(ep, fen_paddrh, paddrh);
361 W16(ep, fen_paddrm, paddrm);
362 W16(ep, fen_paddrl, paddrl);
363
364 W16(ep, fen_taddrh, 0);
365 W16(ep, fen_taddrm, 0);
366 W16(ep, fen_taddrl, 0);
367
368 W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
369 W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
370
371 /* Clear stat counters, in case we ever enable RMON */
372 W32(ep, fen_octc, 0);
373 W32(ep, fen_colc, 0);
374 W32(ep, fen_broc, 0);
375 W32(ep, fen_mulc, 0);
376 W32(ep, fen_uspc, 0);
377 W32(ep, fen_frgc, 0);
378 W32(ep, fen_ospc, 0);
379 W32(ep, fen_jbrc, 0);
380 W32(ep, fen_p64c, 0);
381 W32(ep, fen_p65c, 0);
382 W32(ep, fen_p128c, 0);
383 W32(ep, fen_p256c, 0);
384 W32(ep, fen_p512c, 0);
385 W32(ep, fen_p1024c, 0);
386
387 W16(ep, fen_rfthr, 0); /* Suggested by manual */
388 W16(ep, fen_rfcnt, 0);
389 W16(ep, fen_cftype, 0);
390
391 fs_init_bds(dev);
392
393 /* adjust to speed (for RMII mode) */
394 if (fpi->use_rmii) {
395 if (fep->speed == 100)
396 C8(fcccp, fcc_gfemr, 0x20);
397 else
398 S8(fcccp, fcc_gfemr, 0x20);
399 }
400
401 fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX);
402
403 /* clear events */
404 W16(fccp, fcc_fcce, 0xffff);
405
406 /* Enable interrupts we wish to service */
407 W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
408
409 /* Set GFMR to enable Ethernet operating mode */
410 W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
411
412 /* set sync/delimiters */
413 W16(fccp, fcc_fdsr, 0xd555);
414
415 W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
416
417 if (fpi->use_rmii)
418 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
419
420 /* adjust to duplex mode */
421 if (fep->duplex)
422 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
423 else
424 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
425
426 S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
427}
428
429static void stop(struct net_device *dev)
430{
431 struct fs_enet_private *fep = netdev_priv(dev);
432 fcc_t *fccp = fep->fcc.fccp;
433
434 /* stop ethernet */
435 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
436
437 /* clear events */
438 W16(fccp, fcc_fcce, 0xffff);
439
440 /* clear interrupt mask */
441 W16(fccp, fcc_fccm, 0);
442
443 fs_cleanup_bds(dev);
444}
445
446static void pre_request_irq(struct net_device *dev, int irq)
447{
448 /* nothing */
449}
450
451static void post_free_irq(struct net_device *dev, int irq)
452{
453 /* nothing */
454}
455
456static void napi_clear_rx_event(struct net_device *dev)
457{
458 struct fs_enet_private *fep = netdev_priv(dev);
459 fcc_t *fccp = fep->fcc.fccp;
460
461 W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
462}
463
464static void napi_enable_rx(struct net_device *dev)
465{
466 struct fs_enet_private *fep = netdev_priv(dev);
467 fcc_t *fccp = fep->fcc.fccp;
468
469 S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
470}
471
472static void napi_disable_rx(struct net_device *dev)
473{
474 struct fs_enet_private *fep = netdev_priv(dev);
475 fcc_t *fccp = fep->fcc.fccp;
476
477 C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
478}
479
480static void rx_bd_done(struct net_device *dev)
481{
482 /* nothing */
483}
484
485static void tx_kickstart(struct net_device *dev)
486{
487 /* nothing */
488}
489
490static u32 get_int_events(struct net_device *dev)
491{
492 struct fs_enet_private *fep = netdev_priv(dev);
493 fcc_t *fccp = fep->fcc.fccp;
494
495 return (u32)R16(fccp, fcc_fcce);
496}
497
498static void clear_int_events(struct net_device *dev, u32 int_events)
499{
500 struct fs_enet_private *fep = netdev_priv(dev);
501 fcc_t *fccp = fep->fcc.fccp;
502
503 W16(fccp, fcc_fcce, int_events & 0xffff);
504}
505
506static void ev_error(struct net_device *dev, u32 int_events)
507{
508 printk(KERN_WARNING DRV_MODULE_NAME
509 ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
510}
511
512int get_regs(struct net_device *dev, void *p, int *sizep)
513{
514 struct fs_enet_private *fep = netdev_priv(dev);
515
516 if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t))
517 return -EINVAL;
518
519 memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
520 p = (char *)p + sizeof(fcc_t);
521
522 memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t));
523 p = (char *)p + sizeof(fcc_c_t);
524
525 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
526
527 return 0;
528}
529
530int get_regs_len(struct net_device *dev)
531{
532 return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t);
533}
534
535/* Some transmit errors cause the transmitter to shut
536 * down. We now issue a restart transmit. Since the
537 * errors close the BD and update the pointers, the restart
538 * _should_ pick up without having to reset any of our
539 * pointers either. Also, To workaround 8260 device erratum
540 * CPM37, we must disable and then re-enable the transmitter
541 * following a Late Collision, Underrun, or Retry Limit error.
542 */
543void tx_restart(struct net_device *dev)
544{
545 struct fs_enet_private *fep = netdev_priv(dev);
546 fcc_t *fccp = fep->fcc.fccp;
547
548 C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
549 udelay(10);
550 S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
551
552 fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX);
553}
554
555/*************************************************************************/
556
557const struct fs_ops fs_fcc_ops = {
558 .setup_data = setup_data,
559 .cleanup_data = cleanup_data,
560 .set_multicast_list = set_multicast_list,
561 .restart = restart,
562 .stop = stop,
563 .pre_request_irq = pre_request_irq,
564 .post_free_irq = post_free_irq,
565 .napi_clear_rx_event = napi_clear_rx_event,
566 .napi_enable_rx = napi_enable_rx,
567 .napi_disable_rx = napi_disable_rx,
568 .rx_bd_done = rx_bd_done,
569 .tx_kickstart = tx_kickstart,
570 .get_int_events = get_int_events,
571 .clear_int_events = clear_int_events,
572 .ev_error = ev_error,
573 .get_regs = get_regs,
574 .get_regs_len = get_regs_len,
575 .tx_restart = tx_restart,
576 .allocate_bd = allocate_bd,
577 .free_bd = free_bd,
578};
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
new file mode 100644
index 000000000000..5ef4e845a387
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -0,0 +1,653 @@
1/*
2 * Freescale Ethernet controllers
3 *
4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8xx.h>
45#include <asm/commproc.h>
46#endif
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a CPM1 __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_in32(addr) __raw_readl(addr)
57#define __fs_in16(addr) __raw_readw(addr)
58#else
59/* for others play it safe */
60#define __fs_out32(addr, x) out_be32(addr, x)
61#define __fs_out16(addr, x) out_be16(addr, x)
62#define __fs_in32(addr) in_be32(addr)
63#define __fs_in16(addr) in_be16(addr)
64#endif
65
66/* write */
67#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
68
69/* read */
70#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
71
72/* set bits */
73#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
74
75/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77
78
79/* CRC polynomium used by the FEC for the multicast group filtering */
80#define FEC_CRC_POLY 0x04C11DB7
81
82#define FEC_MAX_MULTICAST_ADDRS 64
83
84/* Interrupt events/masks.
85*/
86#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
87#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
88#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
89#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
90#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
91#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
92#define FEC_ENET_RXF 0x02000000U /* Full frame received */
93#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
94#define FEC_ENET_MII 0x00800000U /* MII interrupt */
95#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
96
97#define FEC_ECNTRL_PINMUX 0x00000004
98#define FEC_ECNTRL_ETHER_EN 0x00000002
99#define FEC_ECNTRL_RESET 0x00000001
100
101#define FEC_RCNTRL_BC_REJ 0x00000010
102#define FEC_RCNTRL_PROM 0x00000008
103#define FEC_RCNTRL_MII_MODE 0x00000004
104#define FEC_RCNTRL_DRT 0x00000002
105#define FEC_RCNTRL_LOOP 0x00000001
106
107#define FEC_TCNTRL_FDEN 0x00000004
108#define FEC_TCNTRL_HBC 0x00000002
109#define FEC_TCNTRL_GTS 0x00000001
110
111
112/* Make MII read/write commands for the FEC.
113*/
114#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
115#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
116#define mk_mii_end 0
117
118#define FEC_MII_LOOPS 10000
119
120/*
121 * Delay to wait for FEC reset command to complete (in us)
122 */
123#define FEC_RESET_DELAY 50
124
125static int whack_reset(fec_t * fecp)
126{
127 int i;
128
129 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
130 for (i = 0; i < FEC_RESET_DELAY; i++) {
131 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
132 return 0; /* OK */
133 udelay(1);
134 }
135
136 return -1;
137}
138
139static int do_pd_setup(struct fs_enet_private *fep)
140{
141 struct platform_device *pdev = to_platform_device(fep->dev);
142 struct resource *r;
143
144 /* Fill out IRQ field */
145 fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
146
147 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
148 fep->fec.fecp =(void*)r->start;
149
150 if(fep->fec.fecp == NULL)
151 return -EINVAL;
152
153 return 0;
154
155}
156
157#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
158#define FEC_RX_EVENT (FEC_ENET_RXF)
159#define FEC_TX_EVENT (FEC_ENET_TXF)
160#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
161 FEC_ENET_BABT | FEC_ENET_EBERR)
162
163static int setup_data(struct net_device *dev)
164{
165 struct fs_enet_private *fep = netdev_priv(dev);
166
167 if (do_pd_setup(fep) != 0)
168 return -EINVAL;
169
170 fep->fec.hthi = 0;
171 fep->fec.htlo = 0;
172
173 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
174 fep->ev_rx = FEC_RX_EVENT;
175 fep->ev_tx = FEC_TX_EVENT;
176 fep->ev_err = FEC_ERR_EVENT_MSK;
177
178 return 0;
179}
180
181static int allocate_bd(struct net_device *dev)
182{
183 struct fs_enet_private *fep = netdev_priv(dev);
184 const struct fs_platform_info *fpi = fep->fpi;
185
186 fep->ring_base = dma_alloc_coherent(fep->dev,
187 (fpi->tx_ring + fpi->rx_ring) *
188 sizeof(cbd_t), &fep->ring_mem_addr,
189 GFP_KERNEL);
190 if (fep->ring_base == NULL)
191 return -ENOMEM;
192
193 return 0;
194}
195
196static void free_bd(struct net_device *dev)
197{
198 struct fs_enet_private *fep = netdev_priv(dev);
199 const struct fs_platform_info *fpi = fep->fpi;
200
201 if(fep->ring_base)
202 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
203 * sizeof(cbd_t),
204 fep->ring_base,
205 fep->ring_mem_addr);
206}
207
208static void cleanup_data(struct net_device *dev)
209{
210 /* nothing */
211}
212
213static void set_promiscuous_mode(struct net_device *dev)
214{
215 struct fs_enet_private *fep = netdev_priv(dev);
216 fec_t *fecp = fep->fec.fecp;
217
218 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
219}
220
221static void set_multicast_start(struct net_device *dev)
222{
223 struct fs_enet_private *fep = netdev_priv(dev);
224
225 fep->fec.hthi = 0;
226 fep->fec.htlo = 0;
227}
228
229static void set_multicast_one(struct net_device *dev, const u8 *mac)
230{
231 struct fs_enet_private *fep = netdev_priv(dev);
232 int temp, hash_index, i, j;
233 u32 crc, csrVal;
234 u8 byte, msb;
235
236 crc = 0xffffffff;
237 for (i = 0; i < 6; i++) {
238 byte = mac[i];
239 for (j = 0; j < 8; j++) {
240 msb = crc >> 31;
241 crc <<= 1;
242 if (msb ^ (byte & 0x1))
243 crc ^= FEC_CRC_POLY;
244 byte >>= 1;
245 }
246 }
247
248 temp = (crc & 0x3f) >> 1;
249 hash_index = ((temp & 0x01) << 4) |
250 ((temp & 0x02) << 2) |
251 ((temp & 0x04)) |
252 ((temp & 0x08) >> 2) |
253 ((temp & 0x10) >> 4);
254 csrVal = 1 << hash_index;
255 if (crc & 1)
256 fep->fec.hthi |= csrVal;
257 else
258 fep->fec.htlo |= csrVal;
259}
260
261static void set_multicast_finish(struct net_device *dev)
262{
263 struct fs_enet_private *fep = netdev_priv(dev);
264 fec_t *fecp = fep->fec.fecp;
265
266 /* if all multi or too many multicasts; just enable all */
267 if ((dev->flags & IFF_ALLMULTI) != 0 ||
268 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
269 fep->fec.hthi = 0xffffffffU;
270 fep->fec.htlo = 0xffffffffU;
271 }
272
273 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
274 FW(fecp, hash_table_high, fep->fec.hthi);
275 FW(fecp, hash_table_low, fep->fec.htlo);
276}
277
278static void set_multicast_list(struct net_device *dev)
279{
280 struct dev_mc_list *pmc;
281
282 if ((dev->flags & IFF_PROMISC) == 0) {
283 set_multicast_start(dev);
284 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
285 set_multicast_one(dev, pmc->dmi_addr);
286 set_multicast_finish(dev);
287 } else
288 set_promiscuous_mode(dev);
289}
290
291static void restart(struct net_device *dev)
292{
293#ifdef CONFIG_DUET
294 immap_t *immap = fs_enet_immap;
295 u32 cptr;
296#endif
297 struct fs_enet_private *fep = netdev_priv(dev);
298 fec_t *fecp = fep->fec.fecp;
299 const struct fs_platform_info *fpi = fep->fpi;
300 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
301 int r;
302 u32 addrhi, addrlo;
303
304 r = whack_reset(fep->fec.fecp);
305 if (r != 0)
306 printk(KERN_ERR DRV_MODULE_NAME
307 ": %s FEC Reset FAILED!\n", dev->name);
308
309 /*
310 * Set station address.
311 */
312 addrhi = ((u32) dev->dev_addr[0] << 24) |
313 ((u32) dev->dev_addr[1] << 16) |
314 ((u32) dev->dev_addr[2] << 8) |
315 (u32) dev->dev_addr[3];
316 addrlo = ((u32) dev->dev_addr[4] << 24) |
317 ((u32) dev->dev_addr[5] << 16);
318 FW(fecp, addr_low, addrhi);
319 FW(fecp, addr_high, addrlo);
320
321 /*
322 * Reset all multicast.
323 */
324 FW(fecp, hash_table_high, fep->fec.hthi);
325 FW(fecp, hash_table_low, fep->fec.htlo);
326
327 /*
328 * Set maximum receive buffer size.
329 */
330 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
331 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
332
333 /* get physical address */
334 rx_bd_base_phys = fep->ring_mem_addr;
335 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
336
337 /*
338 * Set receive and transmit descriptor base.
339 */
340 FW(fecp, r_des_start, rx_bd_base_phys);
341 FW(fecp, x_des_start, tx_bd_base_phys);
342
343 fs_init_bds(dev);
344
345 /*
346 * Enable big endian and don't care about SDMA FC.
347 */
348 FW(fecp, fun_code, 0x78000000);
349
350 /*
351 * Set MII speed.
352 */
353 FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
354
355 /*
356 * Clear any outstanding interrupt.
357 */
358 FW(fecp, ievent, 0xffc0);
359 FW(fecp, ivec, (fep->interrupt / 2) << 29);
360
361
362 /*
363 * adjust to speed (only for DUET & RMII)
364 */
365#ifdef CONFIG_DUET
366 if (fpi->use_rmii) {
367 cptr = in_be32(&immap->im_cpm.cp_cptr);
368 switch (fs_get_fec_index(fpi->fs_no)) {
369 case 0:
370 cptr |= 0x100;
371 if (fep->speed == 10)
372 cptr |= 0x0000010;
373 else if (fep->speed == 100)
374 cptr &= ~0x0000010;
375 break;
376 case 1:
377 cptr |= 0x80;
378 if (fep->speed == 10)
379 cptr |= 0x0000008;
380 else if (fep->speed == 100)
381 cptr &= ~0x0000008;
382 break;
383 default:
384 BUG(); /* should never happen */
385 break;
386 }
387 out_be32(&immap->im_cpm.cp_cptr, cptr);
388 }
389#endif
390
391 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
392 /*
393 * adjust to duplex mode
394 */
395 if (fep->duplex) {
396 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
397 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
398 } else {
399 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
400 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
401 }
402
403 /*
404 * Enable interrupts we wish to service.
405 */
406 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
407 FEC_ENET_RXF | FEC_ENET_RXB);
408
409 /*
410 * And last, enable the transmit and receive processing.
411 */
412 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
413 FW(fecp, r_des_active, 0x01000000);
414}
415
416static void stop(struct net_device *dev)
417{
418 struct fs_enet_private *fep = netdev_priv(dev);
419 fec_t *fecp = fep->fec.fecp;
420 struct fs_enet_mii_bus *bus = fep->mii_bus;
421 const struct fs_mii_bus_info *bi = bus->bus_info;
422 int i;
423
424 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
425 return; /* already down */
426
427 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
428 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
429 i < FEC_RESET_DELAY; i++)
430 udelay(1);
431
432 if (i == FEC_RESET_DELAY)
433 printk(KERN_WARNING DRV_MODULE_NAME
434 ": %s FEC timeout on graceful transmit stop\n",
435 dev->name);
436 /*
437 * Disable FEC. Let only MII interrupts.
438 */
439 FW(fecp, imask, 0);
440 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
441
442 fs_cleanup_bds(dev);
443
444 /* shut down FEC1? that's where the mii bus is */
445 if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
446 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
447 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
448 FW(fecp, ievent, FEC_ENET_MII);
449 FW(fecp, mii_speed, bus->fec.mii_speed);
450 }
451}
452
453static void pre_request_irq(struct net_device *dev, int irq)
454{
455 immap_t *immap = fs_enet_immap;
456 u32 siel;
457
458 /* SIU interrupt */
459 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
460
461 siel = in_be32(&immap->im_siu_conf.sc_siel);
462 if ((irq & 1) == 0)
463 siel |= (0x80000000 >> irq);
464 else
465 siel &= ~(0x80000000 >> (irq & ~1));
466 out_be32(&immap->im_siu_conf.sc_siel, siel);
467 }
468}
469
470static void post_free_irq(struct net_device *dev, int irq)
471{
472 /* nothing */
473}
474
475static void napi_clear_rx_event(struct net_device *dev)
476{
477 struct fs_enet_private *fep = netdev_priv(dev);
478 fec_t *fecp = fep->fec.fecp;
479
480 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
481}
482
483static void napi_enable_rx(struct net_device *dev)
484{
485 struct fs_enet_private *fep = netdev_priv(dev);
486 fec_t *fecp = fep->fec.fecp;
487
488 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
489}
490
491static void napi_disable_rx(struct net_device *dev)
492{
493 struct fs_enet_private *fep = netdev_priv(dev);
494 fec_t *fecp = fep->fec.fecp;
495
496 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
497}
498
499static void rx_bd_done(struct net_device *dev)
500{
501 struct fs_enet_private *fep = netdev_priv(dev);
502 fec_t *fecp = fep->fec.fecp;
503
504 FW(fecp, r_des_active, 0x01000000);
505}
506
507static void tx_kickstart(struct net_device *dev)
508{
509 struct fs_enet_private *fep = netdev_priv(dev);
510 fec_t *fecp = fep->fec.fecp;
511
512 FW(fecp, x_des_active, 0x01000000);
513}
514
515static u32 get_int_events(struct net_device *dev)
516{
517 struct fs_enet_private *fep = netdev_priv(dev);
518 fec_t *fecp = fep->fec.fecp;
519
520 return FR(fecp, ievent) & FR(fecp, imask);
521}
522
523static void clear_int_events(struct net_device *dev, u32 int_events)
524{
525 struct fs_enet_private *fep = netdev_priv(dev);
526 fec_t *fecp = fep->fec.fecp;
527
528 FW(fecp, ievent, int_events);
529}
530
531static void ev_error(struct net_device *dev, u32 int_events)
532{
533 printk(KERN_WARNING DRV_MODULE_NAME
534 ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
535}
536
537int get_regs(struct net_device *dev, void *p, int *sizep)
538{
539 struct fs_enet_private *fep = netdev_priv(dev);
540
541 if (*sizep < sizeof(fec_t))
542 return -EINVAL;
543
544 memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
545
546 return 0;
547}
548
549int get_regs_len(struct net_device *dev)
550{
551 return sizeof(fec_t);
552}
553
554void tx_restart(struct net_device *dev)
555{
556 /* nothing */
557}
558
559/*************************************************************************/
560
561const struct fs_ops fs_fec_ops = {
562 .setup_data = setup_data,
563 .cleanup_data = cleanup_data,
564 .set_multicast_list = set_multicast_list,
565 .restart = restart,
566 .stop = stop,
567 .pre_request_irq = pre_request_irq,
568 .post_free_irq = post_free_irq,
569 .napi_clear_rx_event = napi_clear_rx_event,
570 .napi_enable_rx = napi_enable_rx,
571 .napi_disable_rx = napi_disable_rx,
572 .rx_bd_done = rx_bd_done,
573 .tx_kickstart = tx_kickstart,
574 .get_int_events = get_int_events,
575 .clear_int_events = clear_int_events,
576 .ev_error = ev_error,
577 .get_regs = get_regs,
578 .get_regs_len = get_regs_len,
579 .tx_restart = tx_restart,
580 .allocate_bd = allocate_bd,
581 .free_bd = free_bd,
582};
583
584/***********************************************************************/
585
586static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
587{
588 fec_t *fecp = bus->fec.fecp;
589 int i, ret = -1;
590
591 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
592 BUG();
593
594 /* Add PHY address to register command. */
595 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
596
597 for (i = 0; i < FEC_MII_LOOPS; i++)
598 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
599 break;
600
601 if (i < FEC_MII_LOOPS) {
602 FW(fecp, ievent, FEC_ENET_MII);
603 ret = FR(fecp, mii_data) & 0xffff;
604 }
605
606 return ret;
607}
608
609static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
610{
611 fec_t *fecp = bus->fec.fecp;
612 int i;
613
614 /* this must never happen */
615 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
616 BUG();
617
618 /* Add PHY address to register command. */
619 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
620
621 for (i = 0; i < FEC_MII_LOOPS; i++)
622 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
623 break;
624
625 if (i < FEC_MII_LOOPS)
626 FW(fecp, ievent, FEC_ENET_MII);
627}
628
629int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
630{
631 bd_t *bd = (bd_t *)__res;
632 const struct fs_mii_bus_info *bi = bus->bus_info;
633 fec_t *fecp;
634
635 if (bi->id != 0)
636 return -1;
637
638 bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
639 bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
640 & 0x3F) << 1;
641
642 fecp = bus->fec.fecp;
643
644 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
645 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
646 FW(fecp, ievent, FEC_ENET_MII);
647 FW(fecp, mii_speed, bus->fec.mii_speed);
648
649 bus->mii_read = mii_read;
650 bus->mii_write = mii_write;
651
652 return 0;
653}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
new file mode 100644
index 000000000000..d8c6e9cadcf5
--- /dev/null
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -0,0 +1,524 @@
1/*
2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8xx.h>
45#include <asm/commproc.h>
46#endif
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a 8xx __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_out8(addr, x) __raw_writeb(x, addr)
57#define __fs_in32(addr) __raw_readl(addr)
58#define __fs_in16(addr) __raw_readw(addr)
59#define __fs_in8(addr) __raw_readb(addr)
60#else
61/* for others play it safe */
62#define __fs_out32(addr, x) out_be32(addr, x)
63#define __fs_out16(addr, x) out_be16(addr, x)
64#define __fs_in32(addr) in_be32(addr)
65#define __fs_in16(addr) in_be16(addr)
66#endif
67
68/* write, read, set bits, clear bits */
69#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
70#define R32(_p, _m) __fs_in32(&(_p)->_m)
71#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
72#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
73
74#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
75#define R16(_p, _m) __fs_in16(&(_p)->_m)
76#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
77#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
78
79#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
80#define R8(_p, _m) __fs_in8(&(_p)->_m)
81#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
82#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
83
84#define SCC_MAX_MULTICAST_ADDRS 64
85
86/*
87 * Delay to wait for SCC reset command to complete (in us)
88 */
89#define SCC_RESET_DELAY 50
90#define MAX_CR_CMD_LOOPS 10000
91
92static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
93{
94 cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm;
95 u32 v, ch;
96 int i = 0;
97
98 ch = fep->scc.idx << 2;
99 v = mk_cr_cmd(ch, op);
100 W16(cpmp, cp_cpcr, v | CPM_CR_FLG);
101 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
102 if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
103 break;
104
105 if (i >= MAX_CR_CMD_LOOPS) {
106 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
107 __FUNCTION__);
108 return 1;
109 }
110 return 0;
111}
112
113static int do_pd_setup(struct fs_enet_private *fep)
114{
115 struct platform_device *pdev = to_platform_device(fep->dev);
116 struct resource *r;
117
118 /* Fill out IRQ field */
119 fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
120
121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
122 fep->scc.sccp = (void *)r->start;
123
124 if (fep->scc.sccp == NULL)
125 return -EINVAL;
126
127 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
128 fep->scc.ep = (void *)r->start;
129
130 if (fep->scc.ep == NULL)
131 return -EINVAL;
132
133 return 0;
134}
135
136#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
137#define SCC_RX_EVENT (SCCE_ENET_RXF)
138#define SCC_TX_EVENT (SCCE_ENET_TXB)
139#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
140
141static int setup_data(struct net_device *dev)
142{
143 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145
146 fep->scc.idx = fs_get_scc_index(fpi->fs_no);
147 if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */
148 return -EINVAL;
149
150 do_pd_setup(fep);
151
152 fep->scc.hthi = 0;
153 fep->scc.htlo = 0;
154
155 fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
156 fep->ev_rx = SCC_RX_EVENT;
157 fep->ev_tx = SCC_TX_EVENT;
158 fep->ev_err = SCC_ERR_EVENT_MSK;
159
160 return 0;
161}
162
163static int allocate_bd(struct net_device *dev)
164{
165 struct fs_enet_private *fep = netdev_priv(dev);
166 const struct fs_platform_info *fpi = fep->fpi;
167
168 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
169 sizeof(cbd_t), 8);
170 if (IS_DPERR(fep->ring_mem_addr))
171 return -ENOMEM;
172
173 fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
174
175 return 0;
176}
177
178static void free_bd(struct net_device *dev)
179{
180 struct fs_enet_private *fep = netdev_priv(dev);
181
182 if (fep->ring_base)
183 cpm_dpfree(fep->ring_mem_addr);
184}
185
186static void cleanup_data(struct net_device *dev)
187{
188 /* nothing */
189}
190
191static void set_promiscuous_mode(struct net_device *dev)
192{
193 struct fs_enet_private *fep = netdev_priv(dev);
194 scc_t *sccp = fep->scc.sccp;
195
196 S16(sccp, scc_psmr, SCC_PSMR_PRO);
197}
198
199static void set_multicast_start(struct net_device *dev)
200{
201 struct fs_enet_private *fep = netdev_priv(dev);
202 scc_enet_t *ep = fep->scc.ep;
203
204 W16(ep, sen_gaddr1, 0);
205 W16(ep, sen_gaddr2, 0);
206 W16(ep, sen_gaddr3, 0);
207 W16(ep, sen_gaddr4, 0);
208}
209
210static void set_multicast_one(struct net_device *dev, const u8 * mac)
211{
212 struct fs_enet_private *fep = netdev_priv(dev);
213 scc_enet_t *ep = fep->scc.ep;
214 u16 taddrh, taddrm, taddrl;
215
216 taddrh = ((u16) mac[5] << 8) | mac[4];
217 taddrm = ((u16) mac[3] << 8) | mac[2];
218 taddrl = ((u16) mac[1] << 8) | mac[0];
219
220 W16(ep, sen_taddrh, taddrh);
221 W16(ep, sen_taddrm, taddrm);
222 W16(ep, sen_taddrl, taddrl);
223 scc_cr_cmd(fep, CPM_CR_SET_GADDR);
224}
225
226static void set_multicast_finish(struct net_device *dev)
227{
228 struct fs_enet_private *fep = netdev_priv(dev);
229 scc_t *sccp = fep->scc.sccp;
230 scc_enet_t *ep = fep->scc.ep;
231
232 /* clear promiscuous always */
233 C16(sccp, scc_psmr, SCC_PSMR_PRO);
234
235 /* if all multi or too many multicasts; just enable all */
236 if ((dev->flags & IFF_ALLMULTI) != 0 ||
237 dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
238
239 W16(ep, sen_gaddr1, 0xffff);
240 W16(ep, sen_gaddr2, 0xffff);
241 W16(ep, sen_gaddr3, 0xffff);
242 W16(ep, sen_gaddr4, 0xffff);
243 }
244}
245
246static void set_multicast_list(struct net_device *dev)
247{
248 struct dev_mc_list *pmc;
249
250 if ((dev->flags & IFF_PROMISC) == 0) {
251 set_multicast_start(dev);
252 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
253 set_multicast_one(dev, pmc->dmi_addr);
254 set_multicast_finish(dev);
255 } else
256 set_promiscuous_mode(dev);
257}
258
259/*
260 * This function is called to start or restart the FEC during a link
261 * change. This only happens when switching between half and full
262 * duplex.
263 */
264static void restart(struct net_device *dev)
265{
266 struct fs_enet_private *fep = netdev_priv(dev);
267 scc_t *sccp = fep->scc.sccp;
268 scc_enet_t *ep = fep->scc.ep;
269 const struct fs_platform_info *fpi = fep->fpi;
270 u16 paddrh, paddrm, paddrl;
271 const unsigned char *mac;
272 int i;
273
274 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
275
276 /* clear everything (slow & steady does it) */
277 for (i = 0; i < sizeof(*ep); i++)
278 __fs_out8((char *)ep + i, 0);
279
280 /* point to bds */
281 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
282 W16(ep, sen_genscc.scc_tbase,
283 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
284
285 /* Initialize function code registers for big-endian.
286 */
287 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
288 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
289
290 /* Set maximum bytes per receive buffer.
291 * This appears to be an Ethernet frame size, not the buffer
292 * fragment size. It must be a multiple of four.
293 */
294 W16(ep, sen_genscc.scc_mrblr, 0x5f0);
295
296 /* Set CRC preset and mask.
297 */
298 W32(ep, sen_cpres, 0xffffffff);
299 W32(ep, sen_cmask, 0xdebb20e3);
300
301 W32(ep, sen_crcec, 0); /* CRC Error counter */
302 W32(ep, sen_alec, 0); /* alignment error counter */
303 W32(ep, sen_disfc, 0); /* discard frame counter */
304
305 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
306 W16(ep, sen_retlim, 15); /* Retry limit threshold */
307
308 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
309
310 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
311
312 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
313 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
314
315 /* Clear hash tables.
316 */
317 W16(ep, sen_gaddr1, 0);
318 W16(ep, sen_gaddr2, 0);
319 W16(ep, sen_gaddr3, 0);
320 W16(ep, sen_gaddr4, 0);
321 W16(ep, sen_iaddr1, 0);
322 W16(ep, sen_iaddr2, 0);
323 W16(ep, sen_iaddr3, 0);
324 W16(ep, sen_iaddr4, 0);
325
326 /* set address
327 */
328 mac = dev->dev_addr;
329 paddrh = ((u16) mac[5] << 8) | mac[4];
330 paddrm = ((u16) mac[3] << 8) | mac[2];
331 paddrl = ((u16) mac[1] << 8) | mac[0];
332
333 W16(ep, sen_paddrh, paddrh);
334 W16(ep, sen_paddrm, paddrm);
335 W16(ep, sen_paddrl, paddrl);
336
337 W16(ep, sen_pper, 0);
338 W16(ep, sen_taddrl, 0);
339 W16(ep, sen_taddrm, 0);
340 W16(ep, sen_taddrh, 0);
341
342 fs_init_bds(dev);
343
344 scc_cr_cmd(fep, CPM_CR_INIT_TRX);
345
346 W16(sccp, scc_scce, 0xffff);
347
348 /* Enable interrupts we wish to service.
349 */
350 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
351
352 /* Set GSMR_H to enable all normal operating modes.
353 * Set GSMR_L to enable Ethernet to MC68160.
354 */
355 W32(sccp, scc_gsmrh, 0);
356 W32(sccp, scc_gsmrl,
357 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
358 SCC_GSMRL_MODE_ENET);
359
360 /* Set sync/delimiters.
361 */
362 W16(sccp, scc_dsr, 0xd555);
363
364 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
365 * start frame search 22 bit times after RENA.
366 */
367 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
368
369 /* Set full duplex mode if needed */
370 if (fep->duplex)
371 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
372
373 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
374}
375
376static void stop(struct net_device *dev)
377{
378 struct fs_enet_private *fep = netdev_priv(dev);
379 scc_t *sccp = fep->scc.sccp;
380 int i;
381
382 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
383 udelay(1);
384
385 if (i == SCC_RESET_DELAY)
386 printk(KERN_WARNING DRV_MODULE_NAME
387 ": %s SCC timeout on graceful transmit stop\n",
388 dev->name);
389
390 W16(sccp, scc_sccm, 0);
391 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
392
393 fs_cleanup_bds(dev);
394}
395
396static void pre_request_irq(struct net_device *dev, int irq)
397{
398 immap_t *immap = fs_enet_immap;
399 u32 siel;
400
401 /* SIU interrupt */
402 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
403
404 siel = in_be32(&immap->im_siu_conf.sc_siel);
405 if ((irq & 1) == 0)
406 siel |= (0x80000000 >> irq);
407 else
408 siel &= ~(0x80000000 >> (irq & ~1));
409 out_be32(&immap->im_siu_conf.sc_siel, siel);
410 }
411}
412
413static void post_free_irq(struct net_device *dev, int irq)
414{
415 /* nothing */
416}
417
418static void napi_clear_rx_event(struct net_device *dev)
419{
420 struct fs_enet_private *fep = netdev_priv(dev);
421 scc_t *sccp = fep->scc.sccp;
422
423 W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
424}
425
426static void napi_enable_rx(struct net_device *dev)
427{
428 struct fs_enet_private *fep = netdev_priv(dev);
429 scc_t *sccp = fep->scc.sccp;
430
431 S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
432}
433
434static void napi_disable_rx(struct net_device *dev)
435{
436 struct fs_enet_private *fep = netdev_priv(dev);
437 scc_t *sccp = fep->scc.sccp;
438
439 C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
440}
441
442static void rx_bd_done(struct net_device *dev)
443{
444 /* nothing */
445}
446
447static void tx_kickstart(struct net_device *dev)
448{
449 /* nothing */
450}
451
452static u32 get_int_events(struct net_device *dev)
453{
454 struct fs_enet_private *fep = netdev_priv(dev);
455 scc_t *sccp = fep->scc.sccp;
456
457 return (u32) R16(sccp, scc_scce);
458}
459
460static void clear_int_events(struct net_device *dev, u32 int_events)
461{
462 struct fs_enet_private *fep = netdev_priv(dev);
463 scc_t *sccp = fep->scc.sccp;
464
465 W16(sccp, scc_scce, int_events & 0xffff);
466}
467
468static void ev_error(struct net_device *dev, u32 int_events)
469{
470 printk(KERN_WARNING DRV_MODULE_NAME
471 ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
472}
473
474static int get_regs(struct net_device *dev, void *p, int *sizep)
475{
476 struct fs_enet_private *fep = netdev_priv(dev);
477
478 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t))
479 return -EINVAL;
480
481 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
482 p = (char *)p + sizeof(scc_t);
483
484 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t));
485
486 return 0;
487}
488
489static int get_regs_len(struct net_device *dev)
490{
491 return sizeof(scc_t) + sizeof(scc_enet_t);
492}
493
494static void tx_restart(struct net_device *dev)
495{
496 struct fs_enet_private *fep = netdev_priv(dev);
497
498 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
499}
500
501/*************************************************************************/
502
503const struct fs_ops fs_scc_ops = {
504 .setup_data = setup_data,
505 .cleanup_data = cleanup_data,
506 .set_multicast_list = set_multicast_list,
507 .restart = restart,
508 .stop = stop,
509 .pre_request_irq = pre_request_irq,
510 .post_free_irq = post_free_irq,
511 .napi_clear_rx_event = napi_clear_rx_event,
512 .napi_enable_rx = napi_enable_rx,
513 .napi_disable_rx = napi_disable_rx,
514 .rx_bd_done = rx_bd_done,
515 .tx_kickstart = tx_kickstart,
516 .get_int_events = get_int_events,
517 .clear_int_events = clear_int_events,
518 .ev_error = ev_error,
519 .get_regs = get_regs,
520 .get_regs_len = get_regs_len,
521 .tx_restart = tx_restart,
522 .allocate_bd = allocate_bd,
523 .free_bd = free_bd,
524};
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
new file mode 100644
index 000000000000..24a5e2e23d18
--- /dev/null
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -0,0 +1,405 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42#include "fs_enet.h"
43
44#ifdef CONFIG_8xx
45static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
46{
47 immap_t *im = (immap_t *)fs_enet_immap;
48 void *dir, *dat, *ppar;
49 int adv;
50 u8 msk;
51
52 switch (port) {
53 case fsiop_porta:
54 dir = &im->im_ioport.iop_padir;
55 dat = &im->im_ioport.iop_padat;
56 ppar = &im->im_ioport.iop_papar;
57 break;
58
59 case fsiop_portb:
60 dir = &im->im_cpm.cp_pbdir;
61 dat = &im->im_cpm.cp_pbdat;
62 ppar = &im->im_cpm.cp_pbpar;
63 break;
64
65 case fsiop_portc:
66 dir = &im->im_ioport.iop_pcdir;
67 dat = &im->im_ioport.iop_pcdat;
68 ppar = &im->im_ioport.iop_pcpar;
69 break;
70
71 case fsiop_portd:
72 dir = &im->im_ioport.iop_pddir;
73 dat = &im->im_ioport.iop_pddat;
74 ppar = &im->im_ioport.iop_pdpar;
75 break;
76
77 case fsiop_porte:
78 dir = &im->im_cpm.cp_pedir;
79 dat = &im->im_cpm.cp_pedat;
80 ppar = &im->im_cpm.cp_pepar;
81 break;
82
83 default:
84 printk(KERN_ERR DRV_MODULE_NAME
85 "Illegal port value %d!\n", port);
86 return -EINVAL;
87 }
88
89 adv = bit >> 3;
90 dir = (char *)dir + adv;
91 dat = (char *)dat + adv;
92 ppar = (char *)ppar + adv;
93
94 msk = 1 << (7 - (bit & 7));
95 if ((in_8(ppar) & msk) != 0) {
96 printk(KERN_ERR DRV_MODULE_NAME
97 "pin %d on port %d is not general purpose!\n", bit, port);
98 return -EINVAL;
99 }
100
101 *dirp = dir;
102 *datp = dat;
103 *mskp = msk;
104
105 return 0;
106}
107#endif
108
109#ifdef CONFIG_8260
110static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
111{
112 iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
113 void *dir, *dat, *ppar;
114 int adv;
115 u8 msk;
116
117 switch (port) {
118 case fsiop_porta:
119 dir = &io->iop_pdira;
120 dat = &io->iop_pdata;
121 ppar = &io->iop_ppara;
122 break;
123
124 case fsiop_portb:
125 dir = &io->iop_pdirb;
126 dat = &io->iop_pdatb;
127 ppar = &io->iop_pparb;
128 break;
129
130 case fsiop_portc:
131 dir = &io->iop_pdirc;
132 dat = &io->iop_pdatc;
133 ppar = &io->iop_pparc;
134 break;
135
136 case fsiop_portd:
137 dir = &io->iop_pdird;
138 dat = &io->iop_pdatd;
139 ppar = &io->iop_ppard;
140 break;
141
142 default:
143 printk(KERN_ERR DRV_MODULE_NAME
144 "Illegal port value %d!\n", port);
145 return -EINVAL;
146 }
147
148 adv = bit >> 3;
149 dir = (char *)dir + adv;
150 dat = (char *)dat + adv;
151 ppar = (char *)ppar + adv;
152
153 msk = 1 << (7 - (bit & 7));
154 if ((in_8(ppar) & msk) != 0) {
155 printk(KERN_ERR DRV_MODULE_NAME
156 "pin %d on port %d is not general purpose!\n", bit, port);
157 return -EINVAL;
158 }
159
160 *dirp = dir;
161 *datp = dat;
162 *mskp = msk;
163
164 return 0;
165}
166#endif
167
168static inline void bb_set(u8 *p, u8 m)
169{
170 out_8(p, in_8(p) | m);
171}
172
173static inline void bb_clr(u8 *p, u8 m)
174{
175 out_8(p, in_8(p) & ~m);
176}
177
178static inline int bb_read(u8 *p, u8 m)
179{
180 return (in_8(p) & m) != 0;
181}
182
183static inline void mdio_active(struct fs_enet_mii_bus *bus)
184{
185 bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
186}
187
188static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
189{
190 bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
191}
192
193static inline int mdio_read(struct fs_enet_mii_bus *bus)
194{
195 return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
196}
197
198static inline void mdio(struct fs_enet_mii_bus *bus, int what)
199{
200 if (what)
201 bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
202 else
203 bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
204}
205
206static inline void mdc(struct fs_enet_mii_bus *bus, int what)
207{
208 if (what)
209 bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
210 else
211 bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
212}
213
214static inline void mii_delay(struct fs_enet_mii_bus *bus)
215{
216 udelay(bus->bus_info->i.bitbang.delay);
217}
218
219/* Utility to send the preamble, address, and register (common to read and write). */
220static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
221{
222 int j;
223
224 /*
225 * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure.
226 * The IEEE spec says this is a PHY optional requirement. The AMD
227 * 79C874 requires one after power up and one after a MII communications
228 * error. This means that we are doing more preambles than we need,
229 * but it is safer and will be much more robust.
230 */
231
232 mdio_active(bus);
233 mdio(bus, 1);
234 for (j = 0; j < 32; j++) {
235 mdc(bus, 0);
236 mii_delay(bus);
237 mdc(bus, 1);
238 mii_delay(bus);
239 }
240
241 /* send the start bit (01) and the read opcode (10) or write (10) */
242 mdc(bus, 0);
243 mdio(bus, 0);
244 mii_delay(bus);
245 mdc(bus, 1);
246 mii_delay(bus);
247 mdc(bus, 0);
248 mdio(bus, 1);
249 mii_delay(bus);
250 mdc(bus, 1);
251 mii_delay(bus);
252 mdc(bus, 0);
253 mdio(bus, read);
254 mii_delay(bus);
255 mdc(bus, 1);
256 mii_delay(bus);
257 mdc(bus, 0);
258 mdio(bus, !read);
259 mii_delay(bus);
260 mdc(bus, 1);
261 mii_delay(bus);
262
263 /* send the PHY address */
264 for (j = 0; j < 5; j++) {
265 mdc(bus, 0);
266 mdio(bus, (addr & 0x10) != 0);
267 mii_delay(bus);
268 mdc(bus, 1);
269 mii_delay(bus);
270 addr <<= 1;
271 }
272
273 /* send the register address */
274 for (j = 0; j < 5; j++) {
275 mdc(bus, 0);
276 mdio(bus, (reg & 0x10) != 0);
277 mii_delay(bus);
278 mdc(bus, 1);
279 mii_delay(bus);
280 reg <<= 1;
281 }
282}
283
284static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
285{
286 u16 rdreg;
287 int ret, j;
288 u8 addr = phy_id & 0xff;
289 u8 reg = location & 0xff;
290
291 bitbang_pre(bus, 1, addr, reg);
292
293 /* tri-state our MDIO I/O pin so we can read */
294 mdc(bus, 0);
295 mdio_tristate(bus);
296 mii_delay(bus);
297 mdc(bus, 1);
298 mii_delay(bus);
299
300 /* check the turnaround bit: the PHY should be driving it to zero */
301 if (mdio_read(bus) != 0) {
302 /* PHY didn't drive TA low */
303 for (j = 0; j < 32; j++) {
304 mdc(bus, 0);
305 mii_delay(bus);
306 mdc(bus, 1);
307 mii_delay(bus);
308 }
309 ret = -1;
310 goto out;
311 }
312
313 mdc(bus, 0);
314 mii_delay(bus);
315
316 /* read 16 bits of register data, MSB first */
317 rdreg = 0;
318 for (j = 0; j < 16; j++) {
319 mdc(bus, 1);
320 mii_delay(bus);
321 rdreg <<= 1;
322 rdreg |= mdio_read(bus);
323 mdc(bus, 0);
324 mii_delay(bus);
325 }
326
327 mdc(bus, 1);
328 mii_delay(bus);
329 mdc(bus, 0);
330 mii_delay(bus);
331 mdc(bus, 1);
332 mii_delay(bus);
333
334 ret = rdreg;
335out:
336 return ret;
337}
338
339static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
340{
341 int j;
342 u8 addr = phy_id & 0xff;
343 u8 reg = location & 0xff;
344 u16 value = val & 0xffff;
345
346 bitbang_pre(bus, 0, addr, reg);
347
348 /* send the turnaround (10) */
349 mdc(bus, 0);
350 mdio(bus, 1);
351 mii_delay(bus);
352 mdc(bus, 1);
353 mii_delay(bus);
354 mdc(bus, 0);
355 mdio(bus, 0);
356 mii_delay(bus);
357 mdc(bus, 1);
358 mii_delay(bus);
359
360 /* write 16 bits of register data, MSB first */
361 for (j = 0; j < 16; j++) {
362 mdc(bus, 0);
363 mdio(bus, (value & 0x8000) != 0);
364 mii_delay(bus);
365 mdc(bus, 1);
366 mii_delay(bus);
367 value <<= 1;
368 }
369
370 /*
371 * Tri-state the MDIO line.
372 */
373 mdio_tristate(bus);
374 mdc(bus, 0);
375 mii_delay(bus);
376 mdc(bus, 1);
377 mii_delay(bus);
378}
379
380int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
381{
382 const struct fs_mii_bus_info *bi = bus->bus_info;
383 int r;
384
385 r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
386 &bus->bitbang.mdio_dat,
387 &bus->bitbang.mdio_msk,
388 bi->i.bitbang.mdio_port,
389 bi->i.bitbang.mdio_bit);
390 if (r != 0)
391 return r;
392
393 r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
394 &bus->bitbang.mdc_dat,
395 &bus->bitbang.mdc_msk,
396 bi->i.bitbang.mdc_port,
397 bi->i.bitbang.mdc_bit);
398 if (r != 0)
399 return r;
400
401 bus->mii_read = mii_read;
402 bus->mii_write = mii_write;
403
404 return 0;
405}
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
new file mode 100644
index 000000000000..b3e192d612e5
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fixed.c
@@ -0,0 +1,92 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42#include "fs_enet.h"
43
44static const u16 mii_regs[7] = {
45 0x3100,
46 0x786d,
47 0x0fff,
48 0x0fff,
49 0x01e1,
50 0x45e1,
51 0x0003,
52};
53
54static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
55{
56 int ret = 0;
57
58 if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
59 return -1;
60
61 if (location != 5)
62 ret = mii_regs[location];
63 else
64 ret = bus->fixed.lpa;
65
66 return ret;
67}
68
69static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
70{
71 /* do nothing */
72}
73
74int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
75{
76 const struct fs_mii_bus_info *bi = bus->bus_info;
77
78 bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
79
80 /* if speed is fixed at 10Mb, remove 100Mb modes */
81 if (bi->i.fixed.speed == 10)
82 bus->fixed.lpa &= ~LPA_100;
83
84 /* if duplex is half, remove full duplex modes */
85 if (bi->i.fixed.duplex == 0)
86 bus->fixed.lpa &= ~LPA_DUPLEX;
87
88 bus->mii_read = mii_read;
89 bus->mii_write = mii_write;
90
91 return 0;
92}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ae5a2ed3b264..962580f2c4ab 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -81,7 +81,7 @@
81#include <linux/if_vlan.h> 81#include <linux/if_vlan.h>
82#include <linux/spinlock.h> 82#include <linux/spinlock.h>
83#include <linux/mm.h> 83#include <linux/mm.h>
84#include <linux/device.h> 84#include <linux/platform_device.h>
85#include <linux/ip.h> 85#include <linux/ip.h>
86#include <linux/tcp.h> 86#include <linux/tcp.h>
87#include <linux/udp.h> 87#include <linux/udp.h>
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 1eca1dbca7f1..5a74d3d3dbe1 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -33,6 +33,7 @@
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/version.h> 35#include <linux/version.h>
36#include <linux/platform_device.h>
36#include <asm/ocp.h> 37#include <asm/ocp.h>
37#include <linux/crc32.h> 38#include <linux/crc32.h>
38#include <linux/mii.h> 39#include <linux/mii.h>
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 85d6dc005be0..3e9accf137e7 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -390,10 +390,8 @@ static void ax_changedmtu(struct mkiss *ax)
390 "MTU change cancelled.\n", 390 "MTU change cancelled.\n",
391 ax->dev->name); 391 ax->dev->name);
392 dev->mtu = ax->mtu; 392 dev->mtu = ax->mtu;
393 if (xbuff != NULL) 393 kfree(xbuff);
394 kfree(xbuff); 394 kfree(rbuff);
395 if (rbuff != NULL)
396 kfree(rbuff);
397 return; 395 return;
398 } 396 }
399 397
diff --git a/drivers/net/ibm_emac/Makefile b/drivers/net/ibm_emac/Makefile
index 7f583a333c24..f98ddf0e807a 100644
--- a/drivers/net/ibm_emac/Makefile
+++ b/drivers/net/ibm_emac/Makefile
@@ -1,12 +1,11 @@
1# 1#
2# Makefile for the IBM PPC4xx EMAC controllers 2# Makefile for the PowerPC 4xx on-chip ethernet driver
3# 3#
4 4
5obj-$(CONFIG_IBM_EMAC) += ibm_emac.o 5obj-$(CONFIG_IBM_EMAC) += ibm_emac.o
6 6
7ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o 7ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o
8 8ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += ibm_emac_zmii.o
9# Only need this if you want to see additional debug messages 9ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += ibm_emac_rgmii.o
10ifeq ($(CONFIG_IBM_EMAC_ERRMSG), y) 10ibm_emac-$(CONFIG_IBM_EMAC_TAH) += ibm_emac_tah.o
11ibm_emac-objs += ibm_emac_debug.o 11ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += ibm_emac_debug.o
12endif
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
index 15d5a0e82862..644edbff4f94 100644
--- a/drivers/net/ibm_emac/ibm_emac.h
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -1,110 +1,143 @@
1/* 1/*
2 * ibm_emac.h 2 * drivers/net/ibm_emac/ibm_emac.h
3 * 3 *
4 * Register definitions for PowerPC 4xx on-chip ethernet contoller
4 * 5 *
5 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
6 * June, 2002 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
7 * 8 *
8 * Copyright 2002 MontaVista Softare Inc. 9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * Armin Kuster <akuster@mvista.com>
12 * Copyright 2002-2004 MontaVista Software Inc.
9 * 13 *
10 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 17 * option) any later version.
18 *
14 */ 19 */
20#ifndef __IBM_EMAC_H_
21#define __IBM_EMAC_H_
22
23#include <linux/config.h>
24#include <linux/types.h>
25
26/* This is a simple check to prevent use of this driver on non-tested SoCs */
27#if !defined(CONFIG_405GP) && !defined(CONFIG_405GPR) && !defined(CONFIG_405EP) && \
28 !defined(CONFIG_440GP) && !defined(CONFIG_440GX) && !defined(CONFIG_440SP) && \
29 !defined(CONFIG_440EP) && !defined(CONFIG_NP405H) && !defined(CONFIG_440SPE) && \
30 !defined(CONFIG_440GR)
31#error "Unknown SoC. Please, check chip user manual and make sure EMAC defines are OK"
32#endif
15 33
16#ifndef _IBM_EMAC_H_ 34/* EMAC registers Write Access rules */
17#define _IBM_EMAC_H_ 35struct emac_regs {
18/* General defines needed for the driver */ 36 u32 mr0; /* special */
37 u32 mr1; /* Reset */
38 u32 tmr0; /* special */
39 u32 tmr1; /* special */
40 u32 rmr; /* Reset */
41 u32 isr; /* Always */
42 u32 iser; /* Reset */
43 u32 iahr; /* Reset, R, T */
44 u32 ialr; /* Reset, R, T */
45 u32 vtpid; /* Reset, R, T */
46 u32 vtci; /* Reset, R, T */
47 u32 ptr; /* Reset, T */
48 u32 iaht1; /* Reset, R */
49 u32 iaht2; /* Reset, R */
50 u32 iaht3; /* Reset, R */
51 u32 iaht4; /* Reset, R */
52 u32 gaht1; /* Reset, R */
53 u32 gaht2; /* Reset, R */
54 u32 gaht3; /* Reset, R */
55 u32 gaht4; /* Reset, R */
56 u32 lsah;
57 u32 lsal;
58 u32 ipgvr; /* Reset, T */
59 u32 stacr; /* special */
60 u32 trtr; /* special */
61 u32 rwmr; /* Reset */
62 u32 octx;
63 u32 ocrx;
64 u32 ipcr;
65};
66
67#if !defined(CONFIG_IBM_EMAC4)
68#define EMAC_ETHTOOL_REGS_VER 0
69#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32))
70#else
71#define EMAC_ETHTOOL_REGS_VER 1
72#define EMAC_ETHTOOL_REGS_SIZE sizeof(struct emac_regs)
73#endif
19 74
20/* Emac */ 75/* EMACx_MR0 */
21typedef struct emac_regs { 76#define EMAC_MR0_RXI 0x80000000
22 u32 em0mr0; 77#define EMAC_MR0_TXI 0x40000000
23 u32 em0mr1; 78#define EMAC_MR0_SRST 0x20000000
24 u32 em0tmr0; 79#define EMAC_MR0_TXE 0x10000000
25 u32 em0tmr1; 80#define EMAC_MR0_RXE 0x08000000
26 u32 em0rmr; 81#define EMAC_MR0_WKE 0x04000000
27 u32 em0isr;
28 u32 em0iser;
29 u32 em0iahr;
30 u32 em0ialr;
31 u32 em0vtpid;
32 u32 em0vtci;
33 u32 em0ptr;
34 u32 em0iaht1;
35 u32 em0iaht2;
36 u32 em0iaht3;
37 u32 em0iaht4;
38 u32 em0gaht1;
39 u32 em0gaht2;
40 u32 em0gaht3;
41 u32 em0gaht4;
42 u32 em0lsah;
43 u32 em0lsal;
44 u32 em0ipgvr;
45 u32 em0stacr;
46 u32 em0trtr;
47 u32 em0rwmr;
48} emac_t;
49 82
50/* MODE REG 0 */ 83/* EMACx_MR1 */
51#define EMAC_M0_RXI 0x80000000 84#define EMAC_MR1_FDE 0x80000000
52#define EMAC_M0_TXI 0x40000000 85#define EMAC_MR1_ILE 0x40000000
53#define EMAC_M0_SRST 0x20000000 86#define EMAC_MR1_VLE 0x20000000
54#define EMAC_M0_TXE 0x10000000 87#define EMAC_MR1_EIFC 0x10000000
55#define EMAC_M0_RXE 0x08000000 88#define EMAC_MR1_APP 0x08000000
56#define EMAC_M0_WKE 0x04000000 89#define EMAC_MR1_IST 0x01000000
57 90
58/* MODE Reg 1 */ 91#define EMAC_MR1_MF_MASK 0x00c00000
59#define EMAC_M1_FDE 0x80000000 92#define EMAC_MR1_MF_10 0x00000000
60#define EMAC_M1_ILE 0x40000000 93#define EMAC_MR1_MF_100 0x00400000
61#define EMAC_M1_VLE 0x20000000 94#if !defined(CONFIG_IBM_EMAC4)
62#define EMAC_M1_EIFC 0x10000000 95#define EMAC_MR1_MF_1000 0x00000000
63#define EMAC_M1_APP 0x08000000 96#define EMAC_MR1_MF_1000GPCS 0x00000000
64#define EMAC_M1_AEMI 0x02000000 97#define EMAC_MR1_MF_IPPA(id) 0x00000000
65#define EMAC_M1_IST 0x01000000 98#else
66#define EMAC_M1_MF_1000GPCS 0x00c00000 /* Internal GPCS */ 99#define EMAC_MR1_MF_1000 0x00800000
67#define EMAC_M1_MF_1000MBPS 0x00800000 /* External GPCS */ 100#define EMAC_MR1_MF_1000GPCS 0x00c00000
68#define EMAC_M1_MF_100MBPS 0x00400000 101#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6)
69#define EMAC_M1_RFS_16K 0x00280000 /* 000 for 512 byte */ 102#endif
70#define EMAC_M1_TR 0x00008000 103
71#ifdef CONFIG_IBM_EMAC4 104#define EMAC_TX_FIFO_SIZE 2048
72#define EMAC_M1_RFS_8K 0x00200000 105
73#define EMAC_M1_RFS_4K 0x00180000 106#if !defined(CONFIG_IBM_EMAC4)
74#define EMAC_M1_RFS_2K 0x00100000 107#define EMAC_MR1_RFS_4K 0x00300000
75#define EMAC_M1_RFS_1K 0x00080000 108#define EMAC_MR1_RFS_16K 0x00000000
76#define EMAC_M1_TX_FIFO_16K 0x00050000 /* 0's for 512 byte */ 109#define EMAC_RX_FIFO_SIZE(gige) 4096
77#define EMAC_M1_TX_FIFO_8K 0x00040000 110#define EMAC_MR1_TFS_2K 0x00080000
78#define EMAC_M1_TX_FIFO_4K 0x00030000 111#define EMAC_MR1_TR0_MULT 0x00008000
79#define EMAC_M1_TX_FIFO_2K 0x00020000 112#define EMAC_MR1_JPSM 0x00000000
80#define EMAC_M1_TX_FIFO_1K 0x00010000 113#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
81#define EMAC_M1_TX_TR 0x00008000 114#else
82#define EMAC_M1_TX_MWSW 0x00001000 /* 0 wait for status */ 115#define EMAC_MR1_RFS_4K 0x00180000
83#define EMAC_M1_JUMBO_ENABLE 0x00000800 /* Upt to 9Kr status */ 116#define EMAC_MR1_RFS_16K 0x00280000
84#define EMAC_M1_OPB_CLK_66 0x00000008 /* 66Mhz */ 117#define EMAC_RX_FIFO_SIZE(gige) ((gige) ? 16384 : 4096)
85#define EMAC_M1_OPB_CLK_83 0x00000010 /* 83Mhz */ 118#define EMAC_MR1_TFS_2K 0x00020000
86#define EMAC_M1_OPB_CLK_100 0x00000018 /* 100Mhz */ 119#define EMAC_MR1_TR 0x00008000
87#define EMAC_M1_OPB_CLK_100P 0x00000020 /* 100Mhz+ */ 120#define EMAC_MR1_MWSW_001 0x00001000
88#else /* CONFIG_IBM_EMAC4 */ 121#define EMAC_MR1_JPSM 0x00000800
89#define EMAC_M1_RFS_4K 0x00300000 /* ~4k for 512 byte */ 122#define EMAC_MR1_OBCI_MASK 0x00000038
90#define EMAC_M1_RFS_2K 0x00200000 123#define EMAC_MR1_OBCI_50 0x00000000
91#define EMAC_M1_RFS_1K 0x00100000 124#define EMAC_MR1_OBCI_66 0x00000008
92#define EMAC_M1_TX_FIFO_2K 0x00080000 /* 0's for 512 byte */ 125#define EMAC_MR1_OBCI_83 0x00000010
93#define EMAC_M1_TX_FIFO_1K 0x00040000 126#define EMAC_MR1_OBCI_100 0x00000018
94#define EMAC_M1_TR0_DEPEND 0x00010000 /* 0'x for single packet */ 127#define EMAC_MR1_OBCI_100P 0x00000020
95#define EMAC_M1_TR1_DEPEND 0x00004000 128#define EMAC_MR1_OBCI(freq) ((freq) <= 50 ? EMAC_MR1_OBCI_50 : \
96#define EMAC_M1_TR1_MULTI 0x00002000 129 (freq) <= 66 ? EMAC_MR1_OBCI_66 : \
97#define EMAC_M1_JUMBO_ENABLE 0x00001000 130 (freq) <= 83 ? EMAC_MR1_OBCI_83 : \
98#endif /* CONFIG_IBM_EMAC4 */ 131 (freq) <= 100 ? EMAC_MR1_OBCI_100 : EMAC_MR1_OBCI_100P)
99#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \ 132#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR | \
100 EMAC_M1_APP | \ 133 EMAC_MR1_MWSW_001 | EMAC_MR1_OBCI(opb))
101 EMAC_M1_TR | EMAC_M1_VLE) 134#endif
102 135
103/* Transmit Mode Register 0 */ 136/* EMACx_TMR0 */
104#define EMAC_TMR0_GNP0 0x80000000 137#define EMAC_TMR0_GNP 0x80000000
105#define EMAC_TMR0_GNP1 0x40000000 138#if !defined(CONFIG_IBM_EMAC4)
106#define EMAC_TMR0_GNPD 0x20000000 139#define EMAC_TMR0_DEFAULT 0x00000000
107#define EMAC_TMR0_FC 0x10000000 140#else
108#define EMAC_TMR0_TFAE_2_32 0x00000001 141#define EMAC_TMR0_TFAE_2_32 0x00000001
109#define EMAC_TMR0_TFAE_4_64 0x00000002 142#define EMAC_TMR0_TFAE_4_64 0x00000002
110#define EMAC_TMR0_TFAE_8_128 0x00000003 143#define EMAC_TMR0_TFAE_8_128 0x00000003
@@ -112,14 +145,36 @@ typedef struct emac_regs {
112#define EMAC_TMR0_TFAE_32_512 0x00000005 145#define EMAC_TMR0_TFAE_32_512 0x00000005
113#define EMAC_TMR0_TFAE_64_1024 0x00000006 146#define EMAC_TMR0_TFAE_64_1024 0x00000006
114#define EMAC_TMR0_TFAE_128_2048 0x00000007 147#define EMAC_TMR0_TFAE_128_2048 0x00000007
148#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
149#endif
150#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT)
151
152/* EMACx_TMR1 */
153
154/* IBM manuals are not very clear here.
155 * This is my interpretation of how things are. --ebs
156 */
157#if defined(CONFIG_40x)
158#define EMAC_FIFO_ENTRY_SIZE 8
159#define EMAC_MAL_BURST_SIZE (16 * 4)
160#else
161#define EMAC_FIFO_ENTRY_SIZE 16
162#define EMAC_MAL_BURST_SIZE (64 * 4)
163#endif
164
165#if !defined(CONFIG_IBM_EMAC4)
166#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16))
167#else
168#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14))
169#endif
115 170
116/* Receive Mode Register */ 171/* EMACx_RMR */
117#define EMAC_RMR_SP 0x80000000 172#define EMAC_RMR_SP 0x80000000
118#define EMAC_RMR_SFCS 0x40000000 173#define EMAC_RMR_SFCS 0x40000000
119#define EMAC_RMR_ARRP 0x20000000 174#define EMAC_RMR_RRP 0x20000000
120#define EMAC_RMR_ARP 0x10000000 175#define EMAC_RMR_RFP 0x10000000
121#define EMAC_RMR_AROP 0x08000000 176#define EMAC_RMR_ROP 0x08000000
122#define EMAC_RMR_ARPI 0x04000000 177#define EMAC_RMR_RPIR 0x04000000
123#define EMAC_RMR_PPP 0x02000000 178#define EMAC_RMR_PPP 0x02000000
124#define EMAC_RMR_PME 0x01000000 179#define EMAC_RMR_PME 0x01000000
125#define EMAC_RMR_PMME 0x00800000 180#define EMAC_RMR_PMME 0x00800000
@@ -127,6 +182,9 @@ typedef struct emac_regs {
127#define EMAC_RMR_MIAE 0x00200000 182#define EMAC_RMR_MIAE 0x00200000
128#define EMAC_RMR_BAE 0x00100000 183#define EMAC_RMR_BAE 0x00100000
129#define EMAC_RMR_MAE 0x00080000 184#define EMAC_RMR_MAE 0x00080000
185#if !defined(CONFIG_IBM_EMAC4)
186#define EMAC_RMR_BASE 0x00000000
187#else
130#define EMAC_RMR_RFAF_2_32 0x00000001 188#define EMAC_RMR_RFAF_2_32 0x00000001
131#define EMAC_RMR_RFAF_4_64 0x00000002 189#define EMAC_RMR_RFAF_4_64 0x00000002
132#define EMAC_RMR_RFAF_8_128 0x00000003 190#define EMAC_RMR_RFAF_8_128 0x00000003
@@ -134,9 +192,21 @@ typedef struct emac_regs {
134#define EMAC_RMR_RFAF_32_512 0x00000005 192#define EMAC_RMR_RFAF_32_512 0x00000005
135#define EMAC_RMR_RFAF_64_1024 0x00000006 193#define EMAC_RMR_RFAF_64_1024 0x00000006
136#define EMAC_RMR_RFAF_128_2048 0x00000007 194#define EMAC_RMR_RFAF_128_2048 0x00000007
137#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE) 195#define EMAC_RMR_BASE EMAC_RMR_RFAF_128_2048
196#endif
138 197
139/* Interrupt Status & enable Regs */ 198/* EMACx_ISR & EMACx_ISER */
199#if !defined(CONFIG_IBM_EMAC4)
200#define EMAC_ISR_TXPE 0x00000000
201#define EMAC_ISR_RXPE 0x00000000
202#define EMAC_ISR_TXUE 0x00000000
203#define EMAC_ISR_RXOE 0x00000000
204#else
205#define EMAC_ISR_TXPE 0x20000000
206#define EMAC_ISR_RXPE 0x10000000
207#define EMAC_ISR_TXUE 0x08000000
208#define EMAC_ISR_RXOE 0x04000000
209#endif
140#define EMAC_ISR_OVR 0x02000000 210#define EMAC_ISR_OVR 0x02000000
141#define EMAC_ISR_PP 0x01000000 211#define EMAC_ISR_PP 0x01000000
142#define EMAC_ISR_BP 0x00800000 212#define EMAC_ISR_BP 0x00800000
@@ -147,53 +217,81 @@ typedef struct emac_regs {
147#define EMAC_ISR_PTLE 0x00040000 217#define EMAC_ISR_PTLE 0x00040000
148#define EMAC_ISR_ORE 0x00020000 218#define EMAC_ISR_ORE 0x00020000
149#define EMAC_ISR_IRE 0x00010000 219#define EMAC_ISR_IRE 0x00010000
150#define EMAC_ISR_DBDM 0x00000200 220#define EMAC_ISR_SQE 0x00000080
151#define EMAC_ISR_DB0 0x00000100 221#define EMAC_ISR_TE 0x00000040
152#define EMAC_ISR_SE0 0x00000080
153#define EMAC_ISR_TE0 0x00000040
154#define EMAC_ISR_DB1 0x00000020
155#define EMAC_ISR_SE1 0x00000010
156#define EMAC_ISR_TE1 0x00000008
157#define EMAC_ISR_MOS 0x00000002 222#define EMAC_ISR_MOS 0x00000002
158#define EMAC_ISR_MOF 0x00000001 223#define EMAC_ISR_MOF 0x00000001
159 224
160/* STA CONTROL REG */ 225/* EMACx_STACR */
226#define EMAC_STACR_PHYD_MASK 0xffff
227#define EMAC_STACR_PHYD_SHIFT 16
161#define EMAC_STACR_OC 0x00008000 228#define EMAC_STACR_OC 0x00008000
162#define EMAC_STACR_PHYE 0x00004000 229#define EMAC_STACR_PHYE 0x00004000
163#define EMAC_STACR_WRITE 0x00002000 230#define EMAC_STACR_STAC_MASK 0x00003000
164#define EMAC_STACR_READ 0x00001000 231#define EMAC_STACR_STAC_READ 0x00001000
165#define EMAC_STACR_CLK_83MHZ 0x00000800 /* 0's for 50Mhz */ 232#define EMAC_STACR_STAC_WRITE 0x00002000
166#define EMAC_STACR_CLK_66MHZ 0x00000400 233#if !defined(CONFIG_IBM_EMAC4)
167#define EMAC_STACR_CLK_100MHZ 0x00000C00 234#define EMAC_STACR_OPBC_MASK 0x00000C00
235#define EMAC_STACR_OPBC_50 0x00000000
236#define EMAC_STACR_OPBC_66 0x00000400
237#define EMAC_STACR_OPBC_83 0x00000800
238#define EMAC_STACR_OPBC_100 0x00000C00
239#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \
240 (freq) <= 66 ? EMAC_STACR_OPBC_66 : \
241 (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100)
242#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb)
243#else
244#define EMAC_STACR_BASE(opb) 0x00000000
245#endif
246#define EMAC_STACR_PCDA_MASK 0x1f
247#define EMAC_STACR_PCDA_SHIFT 5
248#define EMAC_STACR_PRA_MASK 0x1f
168 249
169/* Transmit Request Threshold Register */ 250/*
170#define EMAC_TRTR_1600 0x18000000 /* 0's for 64 Bytes */ 251 * For the 440SPe, AMCC inexplicably changed the polarity of
171#define EMAC_TRTR_1024 0x0f000000 252 * the "operation complete" bit in the MII control register.
172#define EMAC_TRTR_512 0x07000000 253 */
173#define EMAC_TRTR_256 0x03000000 254#if defined(CONFIG_440SPE)
174#define EMAC_TRTR_192 0x10000000 255static inline int emac_phy_done(u32 stacr)
175#define EMAC_TRTR_128 0x01000000 256{
257 return !(stacr & EMAC_STACR_OC);
258};
259#define EMAC_STACR_START EMAC_STACR_OC
176 260
261#else /* CONFIG_440SPE */
262static inline int emac_phy_done(u32 stacr)
263{
264 return stacr & EMAC_STACR_OC;
265};
266#define EMAC_STACR_START 0
267#endif /* !CONFIG_440SPE */
268
269/* EMACx_TRTR */
270#if !defined(CONFIG_IBM_EMAC4)
271#define EMAC_TRTR_SHIFT 27
272#else
273#define EMAC_TRTR_SHIFT 24
274#endif
275#define EMAC_TRTR(size) ((((size) >> 6) - 1) << EMAC_TRTR_SHIFT)
276
277/* EMACx_RWMR */
278#if !defined(CONFIG_IBM_EMAC4)
279#define EMAC_RWMR(l,h) (((l) << 23) | ( ((h) & 0x1ff) << 7))
280#else
281#define EMAC_RWMR(l,h) (((l) << 22) | ( ((h) & 0x3ff) << 6))
282#endif
283
284/* EMAC specific TX descriptor control fields (write access) */
177#define EMAC_TX_CTRL_GFCS 0x0200 285#define EMAC_TX_CTRL_GFCS 0x0200
178#define EMAC_TX_CTRL_GP 0x0100 286#define EMAC_TX_CTRL_GP 0x0100
179#define EMAC_TX_CTRL_ISA 0x0080 287#define EMAC_TX_CTRL_ISA 0x0080
180#define EMAC_TX_CTRL_RSA 0x0040 288#define EMAC_TX_CTRL_RSA 0x0040
181#define EMAC_TX_CTRL_IVT 0x0020 289#define EMAC_TX_CTRL_IVT 0x0020
182#define EMAC_TX_CTRL_RVT 0x0010 290#define EMAC_TX_CTRL_RVT 0x0010
183#define EMAC_TX_CTRL_TAH_CSUM 0x000e /* TAH only */ 291#define EMAC_TX_CTRL_TAH_CSUM 0x000e
184#define EMAC_TX_CTRL_TAH_SEG4 0x000a /* TAH only */
185#define EMAC_TX_CTRL_TAH_SEG3 0x0008 /* TAH only */
186#define EMAC_TX_CTRL_TAH_SEG2 0x0006 /* TAH only */
187#define EMAC_TX_CTRL_TAH_SEG1 0x0004 /* TAH only */
188#define EMAC_TX_CTRL_TAH_SEG0 0x0002 /* TAH only */
189#define EMAC_TX_CTRL_TAH_DIS 0x0000 /* TAH only */
190
191#define EMAC_TX_CTRL_DFLT ( \
192 MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP )
193 292
194/* madmal transmit status / Control bits */ 293/* EMAC specific TX descriptor status fields (read access) */
195#define EMAC_TX_ST_BFCS 0x0200 294#define EMAC_TX_ST_BFCS 0x0200
196#define EMAC_TX_ST_BPP 0x0100
197#define EMAC_TX_ST_LCS 0x0080 295#define EMAC_TX_ST_LCS 0x0080
198#define EMAC_TX_ST_ED 0x0040 296#define EMAC_TX_ST_ED 0x0040
199#define EMAC_TX_ST_EC 0x0020 297#define EMAC_TX_ST_EC 0x0020
@@ -202,8 +300,16 @@ typedef struct emac_regs {
202#define EMAC_TX_ST_SC 0x0004 300#define EMAC_TX_ST_SC 0x0004
203#define EMAC_TX_ST_UR 0x0002 301#define EMAC_TX_ST_UR 0x0002
204#define EMAC_TX_ST_SQE 0x0001 302#define EMAC_TX_ST_SQE 0x0001
303#if !defined(CONFIG_IBM_EMAC_TAH)
304#define EMAC_IS_BAD_TX(v) ((v) & (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
305 EMAC_TX_ST_EC | EMAC_TX_ST_LC | \
306 EMAC_TX_ST_MC | EMAC_TX_ST_UR))
307#else
308#define EMAC_IS_BAD_TX(v) ((v) & (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
309 EMAC_TX_ST_EC | EMAC_TX_ST_LC))
310#endif
205 311
206/* madmal receive status / Control bits */ 312/* EMAC specific RX descriptor status fields (read access) */
207#define EMAC_RX_ST_OE 0x0200 313#define EMAC_RX_ST_OE 0x0200
208#define EMAC_RX_ST_PP 0x0100 314#define EMAC_RX_ST_PP 0x0100
209#define EMAC_RX_ST_BP 0x0080 315#define EMAC_RX_ST_BP 0x0080
@@ -214,54 +320,10 @@ typedef struct emac_regs {
214#define EMAC_RX_ST_PTL 0x0004 320#define EMAC_RX_ST_PTL 0x0004
215#define EMAC_RX_ST_ORE 0x0002 321#define EMAC_RX_ST_ORE 0x0002
216#define EMAC_RX_ST_IRE 0x0001 322#define EMAC_RX_ST_IRE 0x0001
217#define EMAC_BAD_RX_PACKET 0x02ff 323#define EMAC_RX_TAH_BAD_CSUM 0x0003
218#define EMAC_CSUM_VER_ERROR 0x0003 324#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \
219 325 EMAC_RX_ST_RP | EMAC_RX_ST_SE | \
220/* identify a bad rx packet dependent on emac features */ 326 EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
221#ifdef CONFIG_IBM_EMAC4 327 EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
222#define EMAC_IS_BAD_RX_PACKET(desc) \ 328 EMAC_RX_ST_IRE )
223 (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \ 329#endif /* __IBM_EMAC_H_ */
224 ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \
225 ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE)))
226#else
227#define EMAC_IS_BAD_RX_PACKET(desc) \
228 (desc & EMAC_BAD_RX_PACKET)
229#endif
230
231/* SoC implementation specific EMAC register defaults */
232#if defined(CONFIG_440GP)
233#define EMAC_RWMR_DEFAULT 0x80009000
234#define EMAC_TMR0_DEFAULT 0x00000000
235#define EMAC_TMR1_DEFAULT 0xf8640000
236#elif defined(CONFIG_440GX)
237#define EMAC_RWMR_DEFAULT 0x1000a200
238#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
239#define EMAC_TMR1_DEFAULT 0xa00f0000
240#elif defined(CONFIG_440SP)
241#define EMAC_RWMR_DEFAULT 0x08002000
242#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048
243#define EMAC_TMR1_DEFAULT 0xf8200000
244#else
245#define EMAC_RWMR_DEFAULT 0x0f002000
246#define EMAC_TMR0_DEFAULT 0x00000000
247#define EMAC_TMR1_DEFAULT 0x380f0000
248#endif /* CONFIG_440GP */
249
250/* Revision specific EMAC register defaults */
251#ifdef CONFIG_IBM_EMAC4
252#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \
253 EMAC_M1_OPB_CLK_83 | \
254 EMAC_M1_TX_MWSW)
255#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \
256 EMAC_RMR_RFAF_128_2048)
257#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \
258 EMAC_TMR0_DEFAULT)
259#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024
260#else /* !CONFIG_IBM_EMAC4 */
261#define EMAC_M1_DEFAULT EMAC_M1_BASE
262#define EMAC_RMR_DEFAULT EMAC_RMR_BASE
263#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0
264#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600
265#endif /* CONFIG_IBM_EMAC4 */
266
267#endif
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 14e9b6315f20..eb7d69478715 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * ibm_emac_core.c 2 * drivers/net/ibm_emac/ibm_emac_core.c
3 * 3 *
4 * Ethernet driver for the built in ethernet on the IBM 4xx PowerPC 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * processors.
6 *
7 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
8 * 5 *
9 * Based on original work by 6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
10 * 8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
11 * Armin Kuster <akuster@mvista.com> 12 * Armin Kuster <akuster@mvista.com>
12 * Johnnie Peters <jpeters@mvista.com> 13 * Johnnie Peters <jpeters@mvista.com>
13 * 14 *
@@ -15,29 +16,24 @@
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version. 18 * option) any later version.
18 * TODO 19 *
19 * - Check for races in the "remove" code path
20 * - Add some Power Management to the MAC and the PHY
21 * - Audit remaining of non-rewritten code (--BenH)
22 * - Cleanup message display using msglevel mecanism
23 * - Address all errata
24 * - Audit all register update paths to ensure they
25 * are being written post soft reset if required.
26 */ 20 */
21
22#include <linux/config.h>
27#include <linux/module.h> 23#include <linux/module.h>
28#include <linux/kernel.h> 24#include <linux/kernel.h>
29#include <linux/sched.h> 25#include <linux/sched.h>
30#include <linux/string.h> 26#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/ptrace.h>
33#include <linux/errno.h> 27#include <linux/errno.h>
34#include <linux/ioport.h>
35#include <linux/slab.h>
36#include <linux/interrupt.h> 28#include <linux/interrupt.h>
37#include <linux/delay.h> 29#include <linux/delay.h>
38#include <linux/init.h> 30#include <linux/init.h>
39#include <linux/types.h> 31#include <linux/types.h>
40#include <linux/dma-mapping.h> 32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
41#include <linux/ethtool.h> 37#include <linux/ethtool.h>
42#include <linux/mii.h> 38#include <linux/mii.h>
43#include <linux/bitops.h> 39#include <linux/bitops.h>
@@ -45,1691 +41,1895 @@
45#include <asm/processor.h> 41#include <asm/processor.h>
46#include <asm/io.h> 42#include <asm/io.h>
47#include <asm/dma.h> 43#include <asm/dma.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h> 44#include <asm/uaccess.h>
50#include <asm/ocp.h> 45#include <asm/ocp.h>
51 46
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/skbuff.h>
55#include <linux/crc32.h>
56
57#include "ibm_emac_core.h" 47#include "ibm_emac_core.h"
58 48#include "ibm_emac_debug.h"
59//#define MDIO_DEBUG(fmt) printk fmt
60#define MDIO_DEBUG(fmt)
61
62//#define LINK_DEBUG(fmt) printk fmt
63#define LINK_DEBUG(fmt)
64
65//#define PKT_DEBUG(fmt) printk fmt
66#define PKT_DEBUG(fmt)
67
68#define DRV_NAME "emac"
69#define DRV_VERSION "2.0"
70#define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
71#define DRV_DESC "IBM EMAC Ethernet driver"
72 49
73/* 50/*
74 * When mdio_idx >= 0, contains a list of emac ocp_devs 51 * Lack of dma_unmap_???? calls is intentional.
75 * that have had their initialization deferred until the 52 *
76 * common MDIO controller has been initialized. 53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
77 */ 65 */
78LIST_HEAD(emac_init_list);
79 66
80MODULE_AUTHOR(DRV_AUTHOR); 67#define DRV_NAME "emac"
68#define DRV_VERSION "3.53"
69#define DRV_DESC "PPC 4xx OCP EMAC driver"
70
81MODULE_DESCRIPTION(DRV_DESC); 71MODULE_DESCRIPTION(DRV_DESC);
72MODULE_AUTHOR
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
82MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
83 75
84static int skb_res = SKB_RES; 76/* minimum number of free TX descriptors required to wake up TX process */
85module_param(skb_res, int, 0444); 77#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
87 "The 405 handles a misaligned IP header fine but\n"
88 "this can help if you are routing to a tunnel or a\n"
89 "device that needs aligned data. 0..2");
90
91#define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
92 78
93static unsigned int rgmii_enable[] = { 79/* If packet size is less than this number, we allocate small skb and copy packet
94 RGMII_RTBI, 80 * contents into it instead of just sending original big skb up
95 RGMII_RGMII, 81 */
96 RGMII_TBI, 82#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
97 RGMII_GMII
98};
99
100static unsigned int rgmii_speed_mask[] = {
101 RGMII_MII2_SPDMASK,
102 RGMII_MII3_SPDMASK
103};
104 83
105static unsigned int rgmii_speed100[] = { 84/* Since multiple EMACs share MDIO lines in various ways, we need
106 RGMII_MII2_100MB, 85 * to avoid re-using the same PHY ID in cases where the arch didn't
107 RGMII_MII3_100MB 86 * setup precise phy_map entries
108}; 87 */
88static u32 busy_phy_map;
89
90#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
92/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
93 * with PHY RX clock problem.
94 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
95 * also allows controlling each EMAC clock
96 */
97static inline void EMAC_RX_CLK_TX(int idx)
98{
99 unsigned long flags;
100 local_irq_save(flags);
109 101
110static unsigned int rgmii_speed1000[] = { 102#if defined(CONFIG_405EP)
111 RGMII_MII2_1000MB, 103 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
112 RGMII_MII3_1000MB 104#else /* CONFIG_440EP || CONFIG_440GR */
113}; 105 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
106#endif
114 107
115#define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev)) 108 local_irq_restore(flags);
109}
116 110
117static unsigned int zmii_enable[][4] = { 111static inline void EMAC_RX_CLK_DEFAULT(int idx)
118 {ZMII_SMII0, ZMII_RMII0, ZMII_MII0, 112{
119 ~(ZMII_MDI1 | ZMII_MDI2 | ZMII_MDI3)}, 113 unsigned long flags;
120 {ZMII_SMII1, ZMII_RMII1, ZMII_MII1, 114 local_irq_save(flags);
121 ~(ZMII_MDI0 | ZMII_MDI2 | ZMII_MDI3)},
122 {ZMII_SMII2, ZMII_RMII2, ZMII_MII2,
123 ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
124 {ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
125};
126 115
127static unsigned int mdi_enable[] = { 116#if defined(CONFIG_405EP)
128 ZMII_MDI0, 117 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
129 ZMII_MDI1, 118#else /* CONFIG_440EP */
130 ZMII_MDI2, 119 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
131 ZMII_MDI3 120#endif
132};
133 121
134static unsigned int zmii_speed = 0x0; 122 local_irq_restore(flags);
135static unsigned int zmii_speed100[] = { 123}
136 ZMII_MII0_100MB, 124#else
137 ZMII_MII1_100MB, 125#define EMAC_RX_CLK_TX(idx) ((void)0)
138 ZMII_MII2_100MB, 126#define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
139 ZMII_MII3_100MB 127#endif
140};
141 128
142/* Since multiple EMACs share MDIO lines in various ways, we need 129#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
143 * to avoid re-using the same PHY ID in cases where the arch didn't 130/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
144 * setup precise phy_map entries 131 * unfortunately this is less flexible than 440EP case, because it's a global
132 * setting for all EMACs, therefore we do this clock trick only during probe.
145 */ 133 */
146static u32 busy_phy_map = 0; 134#define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
136#define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
137 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
138#else
139#define EMAC_CLK_INTERNAL ((void)0)
140#define EMAC_CLK_EXTERNAL ((void)0)
141#endif
147 142
148/* If EMACs share a common MDIO device, this points to it */ 143/* I don't want to litter system log with timeout errors
149static struct net_device *mdio_ndev = NULL; 144 * when we have brain-damaged PHY.
145 */
146static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
147 const char *error)
148{
149#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
150 DBG("%d: %s" NL, dev->def->index, error);
151#else
152 if (net_ratelimit())
153 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
154#endif
155}
150 156
151struct emac_def_dev { 157/* PHY polling intervals */
152 struct list_head link; 158#define PHY_POLL_LINK_ON HZ
153 struct ocp_device *ocpdev; 159#define PHY_POLL_LINK_OFF (HZ / 5)
154 struct ibm_ocp_mal *mal; 160
161/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
162static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
163 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
164 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
165 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
166 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
167 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
168 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
169 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
170 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
171 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
172 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
173 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
174 "tx_bd_excessive_collisions", "tx_bd_late_collision",
175 "tx_bd_multple_collisions", "tx_bd_single_collision",
176 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
177 "tx_errors"
155}; 178};
156 179
157static struct net_device_stats *emac_stats(struct net_device *dev) 180static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
181static void emac_clean_tx_ring(struct ocp_enet_private *dev);
182
183static inline int emac_phy_supports_gige(int phy_mode)
158{ 184{
159 struct ocp_enet_private *fep = dev->priv; 185 return phy_mode == PHY_MODE_GMII ||
160 return &fep->stats; 186 phy_mode == PHY_MODE_RGMII ||
161}; 187 phy_mode == PHY_MODE_TBI ||
188 phy_mode == PHY_MODE_RTBI;
189}
162 190
163static int 191static inline int emac_phy_gpcs(int phy_mode)
164emac_init_rgmii(struct ocp_device *rgmii_dev, int input, int phy_mode)
165{ 192{
166 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(rgmii_dev); 193 return phy_mode == PHY_MODE_TBI ||
167 const char *mode_name[] = { "RTBI", "RGMII", "TBI", "GMII" }; 194 phy_mode == PHY_MODE_RTBI;
168 int mode = -1; 195}
169 196
170 if (!rgmii) { 197static inline void emac_tx_enable(struct ocp_enet_private *dev)
171 rgmii = kmalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL); 198{
199 struct emac_regs *p = dev->emacp;
200 unsigned long flags;
201 u32 r;
172 202
173 if (rgmii == NULL) { 203 local_irq_save(flags);
174 printk(KERN_ERR
175 "rgmii%d: Out of memory allocating RGMII structure!\n",
176 rgmii_dev->def->index);
177 return -ENOMEM;
178 }
179 204
180 memset(rgmii, 0, sizeof(*rgmii)); 205 DBG("%d: tx_enable" NL, dev->def->index);
181 206
182 rgmii->base = 207 r = in_be32(&p->mr0);
183 (struct rgmii_regs *)ioremap(rgmii_dev->def->paddr, 208 if (!(r & EMAC_MR0_TXE))
184 sizeof(*rgmii->base)); 209 out_be32(&p->mr0, r | EMAC_MR0_TXE);
185 if (rgmii->base == NULL) { 210 local_irq_restore(flags);
186 printk(KERN_ERR 211}
187 "rgmii%d: Cannot ioremap bridge registers!\n",
188 rgmii_dev->def->index);
189 212
190 kfree(rgmii); 213static void emac_tx_disable(struct ocp_enet_private *dev)
191 return -ENOMEM; 214{
192 } 215 struct emac_regs *p = dev->emacp;
193 ocp_set_drvdata(rgmii_dev, rgmii); 216 unsigned long flags;
194 } 217 u32 r;
195 218
196 if (phy_mode) { 219 local_irq_save(flags);
197 switch (phy_mode) {
198 case PHY_MODE_GMII:
199 mode = GMII;
200 break;
201 case PHY_MODE_TBI:
202 mode = TBI;
203 break;
204 case PHY_MODE_RTBI:
205 mode = RTBI;
206 break;
207 case PHY_MODE_RGMII:
208 default:
209 mode = RGMII;
210 }
211 rgmii->base->fer &= ~RGMII_FER_MASK(input);
212 rgmii->base->fer |= rgmii_enable[mode] << (4 * input);
213 } else {
214 switch ((rgmii->base->fer & RGMII_FER_MASK(input)) >> (4 *
215 input)) {
216 case RGMII_RTBI:
217 mode = RTBI;
218 break;
219 case RGMII_RGMII:
220 mode = RGMII;
221 break;
222 case RGMII_TBI:
223 mode = TBI;
224 break;
225 case RGMII_GMII:
226 mode = GMII;
227 }
228 }
229 220
230 /* Set mode to RGMII if nothing valid is detected */ 221 DBG("%d: tx_disable" NL, dev->def->index);
231 if (mode < 0)
232 mode = RGMII;
233 222
234 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n", 223 r = in_be32(&p->mr0);
235 rgmii_dev->def->index, input, mode_name[mode]); 224 if (r & EMAC_MR0_TXE) {
225 int n = 300;
226 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
227 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
228 --n;
229 if (unlikely(!n))
230 emac_report_timeout_error(dev, "TX disable timeout");
231 }
232 local_irq_restore(flags);
233}
236 234
237 rgmii->mode[input] = mode; 235static void emac_rx_enable(struct ocp_enet_private *dev)
238 rgmii->users++; 236{
237 struct emac_regs *p = dev->emacp;
238 unsigned long flags;
239 u32 r;
239 240
240 return 0; 241 local_irq_save(flags);
242 if (unlikely(dev->commac.rx_stopped))
243 goto out;
244
245 DBG("%d: rx_enable" NL, dev->def->index);
246
247 r = in_be32(&p->mr0);
248 if (!(r & EMAC_MR0_RXE)) {
249 if (unlikely(!(r & EMAC_MR0_RXI))) {
250 /* Wait if previous async disable is still in progress */
251 int n = 100;
252 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
253 --n;
254 if (unlikely(!n))
255 emac_report_timeout_error(dev,
256 "RX disable timeout");
257 }
258 out_be32(&p->mr0, r | EMAC_MR0_RXE);
259 }
260 out:
261 local_irq_restore(flags);
241} 262}
242 263
243static void 264static void emac_rx_disable(struct ocp_enet_private *dev)
244emac_rgmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
245{ 265{
246 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev); 266 struct emac_regs *p = dev->emacp;
247 unsigned int rgmii_speed; 267 unsigned long flags;
248 268 u32 r;
249 rgmii_speed = in_be32(&rgmii->base->ssr);
250 269
251 rgmii_speed &= ~rgmii_speed_mask[input]; 270 local_irq_save(flags);
252 271
253 if (speed == 1000) 272 DBG("%d: rx_disable" NL, dev->def->index);
254 rgmii_speed |= rgmii_speed1000[input];
255 else if (speed == 100)
256 rgmii_speed |= rgmii_speed100[input];
257 273
258 out_be32(&rgmii->base->ssr, rgmii_speed); 274 r = in_be32(&p->mr0);
275 if (r & EMAC_MR0_RXE) {
276 int n = 300;
277 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
278 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
279 --n;
280 if (unlikely(!n))
281 emac_report_timeout_error(dev, "RX disable timeout");
282 }
283 local_irq_restore(flags);
259} 284}
260 285
261static void emac_close_rgmii(struct ocp_device *ocpdev) 286static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
262{ 287{
263 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev); 288 struct emac_regs *p = dev->emacp;
264 BUG_ON(!rgmii || rgmii->users == 0); 289 unsigned long flags;
290 u32 r;
265 291
266 if (!--rgmii->users) { 292 local_irq_save(flags);
267 ocp_set_drvdata(ocpdev, NULL); 293
268 iounmap((void *)rgmii->base); 294 DBG("%d: rx_disable_async" NL, dev->def->index);
269 kfree(rgmii); 295
270 } 296 r = in_be32(&p->mr0);
297 if (r & EMAC_MR0_RXE)
298 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
299 local_irq_restore(flags);
271} 300}
272 301
273static int emac_init_zmii(struct ocp_device *zmii_dev, int input, int phy_mode) 302static int emac_reset(struct ocp_enet_private *dev)
274{ 303{
275 struct ibm_ocp_zmii *zmii = ZMII_PRIV(zmii_dev); 304 struct emac_regs *p = dev->emacp;
276 const char *mode_name[] = { "SMII", "RMII", "MII" }; 305 unsigned long flags;
277 int mode = -1; 306 int n = 20;
278 307
279 if (!zmii) { 308 DBG("%d: reset" NL, dev->def->index);
280 zmii = kmalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
281 if (zmii == NULL) {
282 printk(KERN_ERR
283 "zmii%d: Out of memory allocating ZMII structure!\n",
284 zmii_dev->def->index);
285 return -ENOMEM;
286 }
287 memset(zmii, 0, sizeof(*zmii));
288 309
289 zmii->base = 310 local_irq_save(flags);
290 (struct zmii_regs *)ioremap(zmii_dev->def->paddr,
291 sizeof(*zmii->base));
292 if (zmii->base == NULL) {
293 printk(KERN_ERR
294 "zmii%d: Cannot ioremap bridge registers!\n",
295 zmii_dev->def->index);
296 311
297 kfree(zmii); 312 if (!dev->reset_failed) {
298 return -ENOMEM; 313 /* 40x erratum suggests stopping RX channel before reset,
299 } 314 * we stop TX as well
300 ocp_set_drvdata(zmii_dev, zmii); 315 */
316 emac_rx_disable(dev);
317 emac_tx_disable(dev);
301 } 318 }
302 319
303 if (phy_mode) { 320 out_be32(&p->mr0, EMAC_MR0_SRST);
304 switch (phy_mode) { 321 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
305 case PHY_MODE_MII: 322 --n;
306 mode = MII; 323 local_irq_restore(flags);
307 break; 324
308 case PHY_MODE_RMII: 325 if (n) {
309 mode = RMII; 326 dev->reset_failed = 0;
310 break; 327 return 0;
311 case PHY_MODE_SMII:
312 default:
313 mode = SMII;
314 }
315 zmii->base->fer &= ~ZMII_FER_MASK(input);
316 zmii->base->fer |= zmii_enable[input][mode];
317 } else { 328 } else {
318 switch ((zmii->base->fer & ZMII_FER_MASK(input)) << (4 * input)) { 329 emac_report_timeout_error(dev, "reset timeout");
319 case ZMII_MII0: 330 dev->reset_failed = 1;
320 mode = MII; 331 return -ETIMEDOUT;
321 break;
322 case ZMII_RMII0:
323 mode = RMII;
324 break;
325 case ZMII_SMII0:
326 mode = SMII;
327 }
328 } 332 }
333}
329 334
330 /* Set mode to SMII if nothing valid is detected */ 335static void emac_hash_mc(struct ocp_enet_private *dev)
331 if (mode < 0) 336{
332 mode = SMII; 337 struct emac_regs *p = dev->emacp;
338 u16 gaht[4] = { 0 };
339 struct dev_mc_list *dmi;
333 340
334 printk(KERN_NOTICE "zmii%d: input %d in %s mode\n", 341 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
335 zmii_dev->def->index, input, mode_name[mode]);
336 342
337 zmii->mode[input] = mode; 343 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
338 zmii->users++; 344 int bit;
345 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
346 dev->def->index,
347 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
348 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
339 349
340 return 0; 350 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
351 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
352 }
353 out_be32(&p->gaht1, gaht[0]);
354 out_be32(&p->gaht2, gaht[1]);
355 out_be32(&p->gaht3, gaht[2]);
356 out_be32(&p->gaht4, gaht[3]);
341} 357}
342 358
343static void emac_enable_zmii_port(struct ocp_device *ocpdev, int input) 359static inline u32 emac_iff2rmr(struct net_device *ndev)
344{ 360{
345 u32 mask; 361 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
346 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 362 EMAC_RMR_BASE;
363
364 if (ndev->flags & IFF_PROMISC)
365 r |= EMAC_RMR_PME;
366 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
367 r |= EMAC_RMR_PMME;
368 else if (ndev->mc_count > 0)
369 r |= EMAC_RMR_MAE;
347 370
348 mask = in_be32(&zmii->base->fer); 371 return r;
349 mask &= zmii_enable[input][MDI]; /* turn all non enabled MDI's off */
350 mask |= zmii_enable[input][zmii->mode[input]] | mdi_enable[input];
351 out_be32(&zmii->base->fer, mask);
352} 372}
353 373
354static void 374static inline int emac_opb_mhz(void)
355emac_zmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
356{ 375{
357 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 376 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
358
359 if (speed == 100)
360 zmii_speed |= zmii_speed100[input];
361 else
362 zmii_speed &= ~zmii_speed100[input];
363
364 out_be32(&zmii->base->ssr, zmii_speed);
365} 377}
366 378
367static void emac_close_zmii(struct ocp_device *ocpdev) 379/* BHs disabled */
380static int emac_configure(struct ocp_enet_private *dev)
368{ 381{
369 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 382 struct emac_regs *p = dev->emacp;
370 BUG_ON(!zmii || zmii->users == 0); 383 struct net_device *ndev = dev->ndev;
384 int gige;
385 u32 r;
371 386
372 if (!--zmii->users) { 387 DBG("%d: configure" NL, dev->def->index);
373 ocp_set_drvdata(ocpdev, NULL);
374 iounmap((void *)zmii->base);
375 kfree(zmii);
376 }
377}
378 388
379int emac_phy_read(struct net_device *dev, int mii_id, int reg) 389 if (emac_reset(dev) < 0)
380{ 390 return -ETIMEDOUT;
381 int count;
382 uint32_t stacr;
383 struct ocp_enet_private *fep = dev->priv;
384 emac_t *emacp = fep->emacp;
385 391
386 MDIO_DEBUG(("%s: phy_read, id: 0x%x, reg: 0x%x\n", dev->name, mii_id, 392 tah_reset(dev->tah_dev);
387 reg));
388 393
389 /* Enable proper ZMII port */ 394 /* Mode register */
390 if (fep->zmii_dev) 395 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
391 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input); 396 if (dev->phy.duplex == DUPLEX_FULL)
397 r |= EMAC_MR1_FDE;
398 switch (dev->phy.speed) {
399 case SPEED_1000:
400 if (emac_phy_gpcs(dev->phy.mode)) {
401 r |= EMAC_MR1_MF_1000GPCS |
402 EMAC_MR1_MF_IPPA(dev->phy.address);
392 403
393 /* Use the EMAC that has the MDIO port */ 404 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
394 if (fep->mdio_dev) { 405 * identify this GPCS PHY later.
395 dev = fep->mdio_dev; 406 */
396 fep = dev->priv; 407 out_be32(&p->ipcr, 0xdeadbeef);
397 emacp = fep->emacp; 408 } else
409 r |= EMAC_MR1_MF_1000;
410 r |= EMAC_MR1_RFS_16K;
411 gige = 1;
412
413 if (dev->ndev->mtu > ETH_DATA_LEN)
414 r |= EMAC_MR1_JPSM;
415 break;
416 case SPEED_100:
417 r |= EMAC_MR1_MF_100;
418 /* Fall through */
419 default:
420 r |= EMAC_MR1_RFS_4K;
421 gige = 0;
422 break;
398 } 423 }
399 424
400 count = 0; 425 if (dev->rgmii_dev)
401 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 426 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
402 && (count++ < MDIO_DELAY)) 427 dev->phy.speed);
403 udelay(1); 428 else
404 MDIO_DEBUG((" (count was %d)\n", count)); 429 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
405 430
406 if ((stacr & EMAC_STACR_OC) == 0) { 431#if !defined(CONFIG_40x)
407 printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name); 432 /* on 40x erratum forces us to NOT use integrated flow control,
408 return -1; 433 * let's hope it works on 44x ;)
434 */
435 if (dev->phy.duplex == DUPLEX_FULL) {
436 if (dev->phy.pause)
437 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
438 else if (dev->phy.asym_pause)
439 r |= EMAC_MR1_APP;
409 } 440 }
441#endif
442 out_be32(&p->mr1, r);
443
444 /* Set individual MAC address */
445 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
446 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
447 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
448 ndev->dev_addr[5]);
449
450 /* VLAN Tag Protocol ID */
451 out_be32(&p->vtpid, 0x8100);
452
453 /* Receive mode register */
454 r = emac_iff2rmr(ndev);
455 if (r & EMAC_RMR_MAE)
456 emac_hash_mc(dev);
457 out_be32(&p->rmr, r);
458
459 /* FIFOs thresholds */
460 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
461 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
462 out_be32(&p->tmr1, r);
463 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
464
465 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
466 there should be still enough space in FIFO to allow the our link
467 partner time to process this frame and also time to send PAUSE
468 frame itself.
469
470 Here is the worst case scenario for the RX FIFO "headroom"
471 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
472
473 1) One maximum-length frame on TX 1522 bytes
474 2) One PAUSE frame time 64 bytes
475 3) PAUSE frame decode time allowance 64 bytes
476 4) One maximum-length frame on RX 1522 bytes
477 5) Round-trip propagation delay of the link (100Mb) 15 bytes
478 ----------
479 3187 bytes
480
481 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
482 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
483 */
484 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
485 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
486 out_be32(&p->rwmr, r);
487
488 /* Set PAUSE timer to the maximum */
489 out_be32(&p->ptr, 0xffff);
490
491 /* IRQ sources */
492 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
493 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
494 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
495 EMAC_ISR_IRE | EMAC_ISR_TE);
496
497 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
498 if (emac_phy_gpcs(dev->phy.mode))
499 mii_reset_phy(&dev->phy);
500
501 return 0;
502}
410 503
411 /* Clear the speed bits and make a read request to the PHY */ 504/* BHs disabled */
412 stacr = ((EMAC_STACR_READ | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ); 505static void emac_reinitialize(struct ocp_enet_private *dev)
413 stacr |= ((mii_id & 0x1F) << 5); 506{
507 DBG("%d: reinitialize" NL, dev->def->index);
414 508
415 out_be32(&emacp->em0stacr, stacr); 509 if (!emac_configure(dev)) {
510 emac_tx_enable(dev);
511 emac_rx_enable(dev);
512 }
513}
416 514
417 count = 0; 515/* BHs disabled */
418 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 516static void emac_full_tx_reset(struct net_device *ndev)
419 && (count++ < MDIO_DELAY)) 517{
420 udelay(1); 518 struct ocp_enet_private *dev = ndev->priv;
421 MDIO_DEBUG((" (count was %d)\n", count)); 519 struct ocp_func_emac_data *emacdata = dev->def->additions;
422 520
423 if ((stacr & EMAC_STACR_OC) == 0) { 521 DBG("%d: full_tx_reset" NL, dev->def->index);
424 printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
425 return -1;
426 }
427 522
428 /* Check for a read error */ 523 emac_tx_disable(dev);
429 if (stacr & EMAC_STACR_PHYE) { 524 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
430 MDIO_DEBUG(("EMAC MDIO PHY error !\n")); 525 emac_clean_tx_ring(dev);
431 return -1; 526 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
432 } 527
528 emac_configure(dev);
433 529
434 MDIO_DEBUG((" -> 0x%x\n", stacr >> 16)); 530 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
531 emac_tx_enable(dev);
532 emac_rx_enable(dev);
435 533
436 return (stacr >> 16); 534 netif_wake_queue(ndev);
437} 535}
438 536
439void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data) 537static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
440{ 538{
441 int count; 539 struct emac_regs *p = dev->emacp;
442 uint32_t stacr; 540 u32 r;
443 struct ocp_enet_private *fep = dev->priv; 541 int n;
444 emac_t *emacp = fep->emacp;
445 542
446 MDIO_DEBUG(("%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x\n", 543 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
447 dev->name, mii_id, reg, data));
448 544
449 /* Enable proper ZMII port */ 545 /* Enable proper MDIO port */
450 if (fep->zmii_dev) 546 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
451 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
452 547
453 /* Use the EMAC that has the MDIO port */ 548 /* Wait for management interface to become idle */
454 if (fep->mdio_dev) { 549 n = 10;
455 dev = fep->mdio_dev; 550 while (!emac_phy_done(in_be32(&p->stacr))) {
456 fep = dev->priv; 551 udelay(1);
457 emacp = fep->emacp; 552 if (!--n)
553 goto to;
458 } 554 }
459 555
460 count = 0; 556 /* Issue read command */
461 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 557 out_be32(&p->stacr,
462 && (count++ < MDIO_DELAY)) 558 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
559 (reg & EMAC_STACR_PRA_MASK)
560 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
561 | EMAC_STACR_START);
562
563 /* Wait for read to complete */
564 n = 100;
565 while (!emac_phy_done(r = in_be32(&p->stacr))) {
463 udelay(1); 566 udelay(1);
464 MDIO_DEBUG((" (count was %d)\n", count)); 567 if (!--n)
568 goto to;
569 }
465 570
466 if ((stacr & EMAC_STACR_OC) == 0) { 571 if (unlikely(r & EMAC_STACR_PHYE)) {
467 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name); 572 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
468 return; 573 id, reg);
574 return -EREMOTEIO;
469 } 575 }
470 576
471 /* Clear the speed bits and make a read request to the PHY */ 577 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
578 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
579 return r;
580 to:
581 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
582 return -ETIMEDOUT;
583}
472 584
473 stacr = ((EMAC_STACR_WRITE | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ); 585static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
474 stacr |= ((mii_id & 0x1f) << 5) | ((data & 0xffff) << 16); 586 u16 val)
587{
588 struct emac_regs *p = dev->emacp;
589 int n;
590
591 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
592 val);
475 593
476 out_be32(&emacp->em0stacr, stacr); 594 /* Enable proper MDIO port */
595 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
477 596
478 count = 0; 597 /* Wait for management interface to be idle */
479 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 598 n = 10;
480 && (count++ < MDIO_DELAY)) 599 while (!emac_phy_done(in_be32(&p->stacr))) {
481 udelay(1); 600 udelay(1);
482 MDIO_DEBUG((" (count was %d)\n", count)); 601 if (!--n)
602 goto to;
603 }
483 604
484 if ((stacr & EMAC_STACR_OC) == 0) 605 /* Issue write command */
485 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name); 606 out_be32(&p->stacr,
607 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
608 (reg & EMAC_STACR_PRA_MASK) |
609 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
610 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
486 611
487 /* Check for a write error */ 612 /* Wait for write to complete */
488 if ((stacr & EMAC_STACR_PHYE) != 0) { 613 n = 100;
489 MDIO_DEBUG(("EMAC MDIO PHY error !\n")); 614 while (!emac_phy_done(in_be32(&p->stacr))) {
615 udelay(1);
616 if (!--n)
617 goto to;
490 } 618 }
619 return;
620 to:
621 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
491} 622}
492 623
493static void emac_txeob_dev(void *param, u32 chanmask) 624static int emac_mdio_read(struct net_device *ndev, int id, int reg)
494{ 625{
495 struct net_device *dev = param; 626 struct ocp_enet_private *dev = ndev->priv;
496 struct ocp_enet_private *fep = dev->priv; 627 int res;
497 unsigned long flags; 628
498 629 local_bh_disable();
499 spin_lock_irqsave(&fep->lock, flags); 630 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
500 631 (u8) reg);
501 PKT_DEBUG(("emac_txeob_dev() entry, tx_cnt: %d\n", fep->tx_cnt)); 632 local_bh_enable();
502 633 return res;
503 while (fep->tx_cnt && 634}
504 !(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {
505 635
506 if (fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_LAST) { 636static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
507 /* Tell the system the transmit completed. */ 637{
508 dma_unmap_single(&fep->ocpdev->dev, 638 struct ocp_enet_private *dev = ndev->priv;
509 fep->tx_desc[fep->ack_slot].data_ptr,
510 fep->tx_desc[fep->ack_slot].data_len,
511 DMA_TO_DEVICE);
512 dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);
513 639
514 if (fep->tx_desc[fep->ack_slot].ctrl & 640 local_bh_disable();
515 (EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC)) 641 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
516 fep->stats.collisions++; 642 (u8) reg, (u16) val);
517 } 643 local_bh_enable();
644}
518 645
519 fep->tx_skb[fep->ack_slot] = (struct sk_buff *)NULL; 646/* BHs disabled */
520 if (++fep->ack_slot == NUM_TX_BUFF) 647static void emac_set_multicast_list(struct net_device *ndev)
521 fep->ack_slot = 0; 648{
649 struct ocp_enet_private *dev = ndev->priv;
650 struct emac_regs *p = dev->emacp;
651 u32 rmr = emac_iff2rmr(ndev);
652
653 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
654 BUG_ON(!netif_running(dev->ndev));
655
656 /* I decided to relax register access rules here to avoid
657 * full EMAC reset.
658 *
659 * There is a real problem with EMAC4 core if we use MWSW_001 bit
660 * in MR1 register and do a full EMAC reset.
661 * One TX BD status update is delayed and, after EMAC reset, it
662 * never happens, resulting in TX hung (it'll be recovered by TX
663 * timeout handler eventually, but this is just gross).
664 * So we either have to do full TX reset or try to cheat here :)
665 *
666 * The only required change is to RX mode register, so I *think* all
667 * we need is just to stop RX channel. This seems to work on all
668 * tested SoCs. --ebs
669 */
670 emac_rx_disable(dev);
671 if (rmr & EMAC_RMR_MAE)
672 emac_hash_mc(dev);
673 out_be32(&p->rmr, rmr);
674 emac_rx_enable(dev);
675}
522 676
523 fep->tx_cnt--; 677/* BHs disabled */
678static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
679{
680 struct ocp_func_emac_data *emacdata = dev->def->additions;
681 int rx_sync_size = emac_rx_sync_size(new_mtu);
682 int rx_skb_size = emac_rx_skb_size(new_mtu);
683 int i, ret = 0;
684
685 emac_rx_disable(dev);
686 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
687
688 if (dev->rx_sg_skb) {
689 ++dev->estats.rx_dropped_resize;
690 dev_kfree_skb(dev->rx_sg_skb);
691 dev->rx_sg_skb = NULL;
524 } 692 }
525 if (fep->tx_cnt < NUM_TX_BUFF)
526 netif_wake_queue(dev);
527 693
528 PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt)); 694 /* Make a first pass over RX ring and mark BDs ready, dropping
695 * non-processed packets on the way. We need this as a separate pass
696 * to simplify error recovery in the case of allocation failure later.
697 */
698 for (i = 0; i < NUM_RX_BUFF; ++i) {
699 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
700 ++dev->estats.rx_dropped_resize;
529 701
530 spin_unlock_irqrestore(&fep->lock, flags); 702 dev->rx_desc[i].data_len = 0;
531} 703 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
704 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
705 }
532 706
533/* 707 /* Reallocate RX ring only if bigger skb buffers are required */
534 Fill/Re-fill the rx chain with valid ctrl/ptrs. 708 if (rx_skb_size <= dev->rx_skb_size)
535 This function will fill from rx_slot up to the parm end. 709 goto skip;
536 So to completely fill the chain pre-set rx_slot to 0 and
537 pass in an end of 0.
538 */
539static void emac_rx_fill(struct net_device *dev, int end)
540{
541 int i;
542 struct ocp_enet_private *fep = dev->priv;
543
544 i = fep->rx_slot;
545 do {
546 /* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
547 * it breaks our cache line alignement. However, we still allocate
548 * +16 so that we end up allocating the exact same size as
549 * dev_alloc_skb() would do.
550 * Also, because of the skb_res, the max DMA size we give to EMAC
551 * is slighly wrong, causing it to potentially DMA 2 more bytes
552 * from a broken/oversized packet. These 16 bytes will take care
553 * that we don't walk on somebody else toes with that.
554 */
555 fep->rx_skb[i] =
556 alloc_skb(fep->rx_buffer_size + 16, GFP_ATOMIC);
557
558 if (fep->rx_skb[i] == NULL) {
559 /* Keep rx_slot here, the next time clean/fill is called
560 * we will try again before the MAL wraps back here
561 * If the MAL tries to use this descriptor with
562 * the EMPTY bit off it will cause the
563 * rxde interrupt. That is where we will
564 * try again to allocate an sk_buff.
565 */
566 break;
567 710
711 /* Second pass, allocate new skbs */
712 for (i = 0; i < NUM_RX_BUFF; ++i) {
713 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
714 if (!skb) {
715 ret = -ENOMEM;
716 goto oom;
568 } 717 }
569 718
570 if (skb_res) 719 BUG_ON(!dev->rx_skb[i]);
571 skb_reserve(fep->rx_skb[i], skb_res); 720 dev_kfree_skb(dev->rx_skb[i]);
572 721
573 /* We must NOT dma_map_single the cache line right after the 722 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
574 * buffer, so we must crop our sync size to account for the 723 dev->rx_desc[i].data_ptr =
575 * reserved space 724 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
576 */ 725 DMA_FROM_DEVICE) + 2;
577 fep->rx_desc[i].data_ptr = 726 dev->rx_skb[i] = skb;
578 (unsigned char *)dma_map_single(&fep->ocpdev->dev, 727 }
579 (void *)fep->rx_skb[i]-> 728 skip:
580 data, 729 /* Check if we need to change "Jumbo" bit in MR1 */
581 fep->rx_buffer_size - 730 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
582 skb_res, DMA_FROM_DEVICE); 731 /* This is to prevent starting RX channel in emac_rx_enable() */
583 732 dev->commac.rx_stopped = 1;
584 /* 733
585 * Some 4xx implementations use the previously 734 dev->ndev->mtu = new_mtu;
586 * reserved bits in data_len to encode the MS 735 emac_full_tx_reset(dev->ndev);
587 * 4-bits of a 36-bit physical address (ERPN) 736 }
588 * This must be initialized.
589 */
590 fep->rx_desc[i].data_len = 0;
591 fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR |
592 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
593 737
594 } while ((i = (i + 1) % NUM_RX_BUFF) != end); 738 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
739 oom:
740 /* Restart RX */
741 dev->commac.rx_stopped = dev->rx_slot = 0;
742 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
743 emac_rx_enable(dev);
595 744
596 fep->rx_slot = i; 745 return ret;
597} 746}
598 747
599static void 748/* Process ctx, rtnl_lock semaphore */
600emac_rx_csum(struct net_device *dev, unsigned short ctrl, struct sk_buff *skb) 749static int emac_change_mtu(struct net_device *ndev, int new_mtu)
601{ 750{
602 struct ocp_enet_private *fep = dev->priv; 751 struct ocp_enet_private *dev = ndev->priv;
752 int ret = 0;
603 753
604 /* Exit if interface has no TAH engine */ 754 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
605 if (!fep->tah_dev) { 755 return -EINVAL;
606 skb->ip_summed = CHECKSUM_NONE;
607 return;
608 }
609 756
610 /* Check for TCP/UDP/IP csum error */ 757 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
611 if (ctrl & EMAC_CSUM_VER_ERROR) {
612 /* Let the stack verify checksum errors */
613 skb->ip_summed = CHECKSUM_NONE;
614/* adapter->hw_csum_err++; */
615 } else {
616 /* Csum is good */
617 skb->ip_summed = CHECKSUM_UNNECESSARY;
618/* adapter->hw_csum_good++; */
619 }
620}
621 758
622static int emac_rx_clean(struct net_device *dev) 759 local_bh_disable();
623{ 760 if (netif_running(ndev)) {
624 int i, b, bnum = 0, buf[6]; 761 /* Check if we really need to reinitalize RX ring */
625 int error, frame_length; 762 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
626 struct ocp_enet_private *fep = dev->priv; 763 ret = emac_resize_rx_ring(dev, new_mtu);
627 unsigned short ctrl; 764 }
628 765
629 i = fep->rx_slot; 766 if (!ret) {
767 ndev->mtu = new_mtu;
768 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
769 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
770 }
771 local_bh_enable();
630 772
631 PKT_DEBUG(("emac_rx_clean() entry, rx_slot: %d\n", fep->rx_slot)); 773 return ret;
774}
632 775
633 do { 776static void emac_clean_tx_ring(struct ocp_enet_private *dev)
634 if (fep->rx_skb[i] == NULL) 777{
635 continue; /*we have already handled the packet but haved failed to alloc */ 778 int i;
636 /* 779 for (i = 0; i < NUM_TX_BUFF; ++i) {
637 since rx_desc is in uncached mem we don't keep reading it directly 780 if (dev->tx_skb[i]) {
638 we pull out a local copy of ctrl and do the checks on the copy. 781 dev_kfree_skb(dev->tx_skb[i]);
639 */ 782 dev->tx_skb[i] = NULL;
640 ctrl = fep->rx_desc[i].ctrl; 783 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
641 if (ctrl & MAL_RX_CTRL_EMPTY) 784 ++dev->estats.tx_dropped;
642 break; /*we don't have any more ready packets */
643
644 if (EMAC_IS_BAD_RX_PACKET(ctrl)) {
645 fep->stats.rx_errors++;
646 fep->stats.rx_dropped++;
647
648 if (ctrl & EMAC_RX_ST_OE)
649 fep->stats.rx_fifo_errors++;
650 if (ctrl & EMAC_RX_ST_AE)
651 fep->stats.rx_frame_errors++;
652 if (ctrl & EMAC_RX_ST_BFCS)
653 fep->stats.rx_crc_errors++;
654 if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |
655 EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))
656 fep->stats.rx_length_errors++;
657 } else {
658 if ((ctrl & (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) ==
659 (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) {
660 /* Single descriptor packet */
661 emac_rx_csum(dev, ctrl, fep->rx_skb[i]);
662 /* Send the skb up the chain. */
663 frame_length = fep->rx_desc[i].data_len - 4;
664 skb_put(fep->rx_skb[i], frame_length);
665 fep->rx_skb[i]->dev = dev;
666 fep->rx_skb[i]->protocol =
667 eth_type_trans(fep->rx_skb[i], dev);
668 error = netif_rx(fep->rx_skb[i]);
669
670 if ((error == NET_RX_DROP) ||
671 (error == NET_RX_BAD)) {
672 fep->stats.rx_dropped++;
673 } else {
674 fep->stats.rx_packets++;
675 fep->stats.rx_bytes += frame_length;
676 }
677 fep->rx_skb[i] = NULL;
678 } else {
679 /* Multiple descriptor packet */
680 if (ctrl & MAL_RX_CTRL_FIRST) {
681 if (fep->rx_desc[(i + 1) % NUM_RX_BUFF].
682 ctrl & MAL_RX_CTRL_EMPTY)
683 break;
684 bnum = 0;
685 buf[bnum] = i;
686 ++bnum;
687 continue;
688 }
689 if (((ctrl & MAL_RX_CTRL_FIRST) !=
690 MAL_RX_CTRL_FIRST) &&
691 ((ctrl & MAL_RX_CTRL_LAST) !=
692 MAL_RX_CTRL_LAST)) {
693 if (fep->rx_desc[(i + 1) %
694 NUM_RX_BUFF].ctrl &
695 MAL_RX_CTRL_EMPTY) {
696 i = buf[0];
697 break;
698 }
699 buf[bnum] = i;
700 ++bnum;
701 continue;
702 }
703 if (ctrl & MAL_RX_CTRL_LAST) {
704 buf[bnum] = i;
705 ++bnum;
706 skb_put(fep->rx_skb[buf[0]],
707 fep->rx_desc[buf[0]].data_len);
708 for (b = 1; b < bnum; b++) {
709 /*
710 * MAL is braindead, we need
711 * to copy the remainder
712 * of the packet from the
713 * latter descriptor buffers
714 * to the first skb. Then
715 * dispose of the source
716 * skbs.
717 *
718 * Once the stack is fixed
719 * to handle frags on most
720 * protocols we can generate
721 * a fragmented skb with
722 * no copies.
723 */
724 memcpy(fep->rx_skb[buf[0]]->
725 data +
726 fep->rx_skb[buf[0]]->len,
727 fep->rx_skb[buf[b]]->
728 data,
729 fep->rx_desc[buf[b]].
730 data_len);
731 skb_put(fep->rx_skb[buf[0]],
732 fep->rx_desc[buf[b]].
733 data_len);
734 dma_unmap_single(&fep->ocpdev->
735 dev,
736 fep->
737 rx_desc[buf
738 [b]].
739 data_ptr,
740 fep->
741 rx_desc[buf
742 [b]].
743 data_len,
744 DMA_FROM_DEVICE);
745 dev_kfree_skb(fep->
746 rx_skb[buf[b]]);
747 }
748 emac_rx_csum(dev, ctrl,
749 fep->rx_skb[buf[0]]);
750
751 fep->rx_skb[buf[0]]->dev = dev;
752 fep->rx_skb[buf[0]]->protocol =
753 eth_type_trans(fep->rx_skb[buf[0]],
754 dev);
755 error = netif_rx(fep->rx_skb[buf[0]]);
756
757 if ((error == NET_RX_DROP)
758 || (error == NET_RX_BAD)) {
759 fep->stats.rx_dropped++;
760 } else {
761 fep->stats.rx_packets++;
762 fep->stats.rx_bytes +=
763 fep->rx_skb[buf[0]]->len;
764 }
765 for (b = 0; b < bnum; b++)
766 fep->rx_skb[buf[b]] = NULL;
767 }
768 }
769 } 785 }
770 } while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot); 786 dev->tx_desc[i].ctrl = 0;
771 787 dev->tx_desc[i].data_ptr = 0;
772 PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot)); 788 }
773
774 return i;
775} 789}
776 790
777static void emac_rxeob_dev(void *param, u32 chanmask) 791static void emac_clean_rx_ring(struct ocp_enet_private *dev)
778{ 792{
779 struct net_device *dev = param; 793 int i;
780 struct ocp_enet_private *fep = dev->priv; 794 for (i = 0; i < NUM_RX_BUFF; ++i)
781 unsigned long flags; 795 if (dev->rx_skb[i]) {
782 int n; 796 dev->rx_desc[i].ctrl = 0;
797 dev_kfree_skb(dev->rx_skb[i]);
798 dev->rx_skb[i] = NULL;
799 dev->rx_desc[i].data_ptr = 0;
800 }
783 801
784 spin_lock_irqsave(&fep->lock, flags); 802 if (dev->rx_sg_skb) {
785 if ((n = emac_rx_clean(dev)) != fep->rx_slot) 803 dev_kfree_skb(dev->rx_sg_skb);
786 emac_rx_fill(dev, n); 804 dev->rx_sg_skb = NULL;
787 spin_unlock_irqrestore(&fep->lock, flags); 805 }
788} 806}
789 807
790/* 808static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
791 * This interrupt should never occurr, we don't program 809 int flags)
792 * the MAL for contiunous mode.
793 */
794static void emac_txde_dev(void *param, u32 chanmask)
795{ 810{
796 struct net_device *dev = param; 811 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
797 struct ocp_enet_private *fep = dev->priv; 812 if (unlikely(!skb))
813 return -ENOMEM;
798 814
799 printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name); 815 dev->rx_skb[slot] = skb;
816 dev->rx_desc[slot].data_len = 0;
800 817
801 emac_mac_dump(dev); 818 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
802 emac_mal_dump(dev); 819 dev->rx_desc[slot].data_ptr =
820 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
821 DMA_FROM_DEVICE) + 2;
822 barrier();
823 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
824 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
803 825
804 /* Reenable the transmit channel */ 826 return 0;
805 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
806} 827}
807 828
808/* 829static void emac_print_link_status(struct ocp_enet_private *dev)
809 * This interrupt should be very rare at best. This occurs when
810 * the hardware has a problem with the receive descriptors. The manual
811 * states that it occurs when the hardware cannot the receive descriptor
812 * empty bit is not set. The recovery mechanism will be to
813 * traverse through the descriptors, handle any that are marked to be
814 * handled and reinitialize each along the way. At that point the driver
815 * will be restarted.
816 */
817static void emac_rxde_dev(void *param, u32 chanmask)
818{ 830{
819 struct net_device *dev = param; 831 if (netif_carrier_ok(dev->ndev))
820 struct ocp_enet_private *fep = dev->priv; 832 printk(KERN_INFO "%s: link is up, %d %s%s\n",
821 unsigned long flags; 833 dev->ndev->name, dev->phy.speed,
822 834 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
823 if (net_ratelimit()) { 835 dev->phy.pause ? ", pause enabled" :
824 printk(KERN_WARNING "%s: receive descriptor error\n", 836 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
825 fep->ndev->name); 837 else
838 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
839}
826 840
827 emac_mac_dump(dev); 841/* Process ctx, rtnl_lock semaphore */
828 emac_mal_dump(dev); 842static int emac_open(struct net_device *ndev)
829 emac_desc_dump(dev); 843{
844 struct ocp_enet_private *dev = ndev->priv;
845 struct ocp_func_emac_data *emacdata = dev->def->additions;
846 int err, i;
847
848 DBG("%d: open" NL, dev->def->index);
849
850 /* Setup error IRQ handler */
851 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
852 if (err) {
853 printk(KERN_ERR "%s: failed to request IRQ %d\n",
854 ndev->name, dev->def->irq);
855 return err;
830 } 856 }
831 857
832 /* Disable RX channel */ 858 /* Allocate RX ring */
833 spin_lock_irqsave(&fep->lock, flags); 859 for (i = 0; i < NUM_RX_BUFF; ++i)
834 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 860 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
835 861 printk(KERN_ERR "%s: failed to allocate RX ring\n",
836 /* For now, charge the error against all emacs */ 862 ndev->name);
837 fep->stats.rx_errors++; 863 goto oom;
838 864 }
839 /* so do we have any good packets still? */
840 emac_rx_clean(dev);
841
842 /* When the interface is restarted it resets processing to the
843 * first descriptor in the table.
844 */
845
846 fep->rx_slot = 0;
847 emac_rx_fill(dev, 0);
848 865
849 set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask); 866 local_bh_disable();
850 set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask); 867 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
868 dev->commac.rx_stopped = 0;
869 dev->rx_sg_skb = NULL;
870
871 if (dev->phy.address >= 0) {
872 int link_poll_interval;
873 if (dev->phy.def->ops->poll_link(&dev->phy)) {
874 dev->phy.def->ops->read_link(&dev->phy);
875 EMAC_RX_CLK_DEFAULT(dev->def->index);
876 netif_carrier_on(dev->ndev);
877 link_poll_interval = PHY_POLL_LINK_ON;
878 } else {
879 EMAC_RX_CLK_TX(dev->def->index);
880 netif_carrier_off(dev->ndev);
881 link_poll_interval = PHY_POLL_LINK_OFF;
882 }
883 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
884 emac_print_link_status(dev);
885 } else
886 netif_carrier_on(dev->ndev);
887
888 emac_configure(dev);
889 mal_poll_add(dev->mal, &dev->commac);
890 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
891 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
892 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
893 emac_tx_enable(dev);
894 emac_rx_enable(dev);
895 netif_start_queue(ndev);
896 local_bh_enable();
851 897
852 /* Reenable the receive channels */ 898 return 0;
853 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 899 oom:
854 spin_unlock_irqrestore(&fep->lock, flags); 900 emac_clean_rx_ring(dev);
901 free_irq(dev->def->irq, dev);
902 return -ENOMEM;
855} 903}
856 904
857static irqreturn_t 905/* BHs disabled */
858emac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs) 906static int emac_link_differs(struct ocp_enet_private *dev)
859{ 907{
860 struct net_device *dev = dev_instance; 908 u32 r = in_be32(&dev->emacp->mr1);
861 struct ocp_enet_private *fep = dev->priv;
862 emac_t *emacp = fep->emacp;
863 unsigned long tmp_em0isr;
864 909
865 /* EMAC interrupt */ 910 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
866 tmp_em0isr = in_be32(&emacp->em0isr); 911 int speed, pause, asym_pause;
867 if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {
868 /* This error is a hard transmit error - could retransmit */
869 fep->stats.tx_errors++;
870 912
871 /* Reenable the transmit channel */ 913 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
872 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask); 914 speed = SPEED_1000;
915 else if (r & EMAC_MR1_MF_100)
916 speed = SPEED_100;
917 else
918 speed = SPEED_10;
873 919
874 } else { 920 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
875 fep->stats.rx_errors++; 921 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
922 pause = 1;
923 asym_pause = 0;
924 break;
925 case EMAC_MR1_APP:
926 pause = 0;
927 asym_pause = 1;
928 break;
929 default:
930 pause = asym_pause = 0;
876 } 931 }
877 932 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
878 if (tmp_em0isr & EMAC_ISR_RP) 933 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
879 fep->stats.rx_length_errors++;
880 if (tmp_em0isr & EMAC_ISR_ALE)
881 fep->stats.rx_frame_errors++;
882 if (tmp_em0isr & EMAC_ISR_BFCS)
883 fep->stats.rx_crc_errors++;
884 if (tmp_em0isr & EMAC_ISR_PTLE)
885 fep->stats.rx_length_errors++;
886 if (tmp_em0isr & EMAC_ISR_ORE)
887 fep->stats.rx_length_errors++;
888 if (tmp_em0isr & EMAC_ISR_TE0)
889 fep->stats.tx_aborted_errors++;
890
891 emac_err_dump(dev, tmp_em0isr);
892
893 out_be32(&emacp->em0isr, tmp_em0isr);
894
895 return IRQ_HANDLED;
896} 934}
897 935
898static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) 936/* BHs disabled */
937static void emac_link_timer(unsigned long data)
899{ 938{
900 unsigned short ctrl; 939 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
901 unsigned long flags; 940 int link_poll_interval;
902 struct ocp_enet_private *fep = dev->priv;
903 emac_t *emacp = fep->emacp;
904 int len = skb->len;
905 unsigned int offset = 0, size, f, tx_slot_first;
906 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
907 941
908 spin_lock_irqsave(&fep->lock, flags); 942 DBG2("%d: link timer" NL, dev->def->index);
909 943
910 len -= skb->data_len; 944 if (dev->phy.def->ops->poll_link(&dev->phy)) {
945 if (!netif_carrier_ok(dev->ndev)) {
946 EMAC_RX_CLK_DEFAULT(dev->def->index);
911 947
912 if ((fep->tx_cnt + nr_frags + len / DESC_BUF_SIZE + 1) > NUM_TX_BUFF) { 948 /* Get new link parameters */
913 PKT_DEBUG(("emac_start_xmit() stopping queue\n")); 949 dev->phy.def->ops->read_link(&dev->phy);
914 netif_stop_queue(dev);
915 spin_unlock_irqrestore(&fep->lock, flags);
916 return -EBUSY;
917 }
918
919 tx_slot_first = fep->tx_slot;
920 950
921 while (len) { 951 if (dev->tah_dev || emac_link_differs(dev))
922 size = min(len, DESC_BUF_SIZE); 952 emac_full_tx_reset(dev->ndev);
923 953
924 fep->tx_desc[fep->tx_slot].data_len = (short)size; 954 netif_carrier_on(dev->ndev);
925 fep->tx_desc[fep->tx_slot].data_ptr = 955 emac_print_link_status(dev);
926 (unsigned char *)dma_map_single(&fep->ocpdev->dev, 956 }
927 (void *)((unsigned int)skb-> 957 link_poll_interval = PHY_POLL_LINK_ON;
928 data + offset), 958 } else {
929 size, DMA_TO_DEVICE); 959 if (netif_carrier_ok(dev->ndev)) {
930 960 EMAC_RX_CLK_TX(dev->def->index);
931 ctrl = EMAC_TX_CTRL_DFLT; 961#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
932 if (fep->tx_slot != tx_slot_first) 962 emac_reinitialize(dev);
933 ctrl |= MAL_TX_CTRL_READY; 963#endif
934 if ((NUM_TX_BUFF - 1) == fep->tx_slot) 964 netif_carrier_off(dev->ndev);
935 ctrl |= MAL_TX_CTRL_WRAP; 965 emac_print_link_status(dev);
936 if (!nr_frags && (len == size)) {
937 ctrl |= MAL_TX_CTRL_LAST;
938 fep->tx_skb[fep->tx_slot] = skb;
939 } 966 }
940 if (skb->ip_summed == CHECKSUM_HW)
941 ctrl |= EMAC_TX_CTRL_TAH_CSUM;
942 967
943 fep->tx_desc[fep->tx_slot].ctrl = ctrl; 968 /* Retry reset if the previous attempt failed.
969 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
970 * case, but I left it here because it shouldn't trigger for
971 * sane PHYs anyway.
972 */
973 if (unlikely(dev->reset_failed))
974 emac_reinitialize(dev);
944 975
945 len -= size; 976 link_poll_interval = PHY_POLL_LINK_OFF;
946 offset += size; 977 }
978 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
979}
947 980
948 /* Bump tx count */ 981/* BHs disabled */
949 if (++fep->tx_cnt == NUM_TX_BUFF) 982static void emac_force_link_update(struct ocp_enet_private *dev)
950 netif_stop_queue(dev); 983{
984 netif_carrier_off(dev->ndev);
985 if (timer_pending(&dev->link_timer))
986 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
987}
951 988
952 /* Next descriptor */ 989/* Process ctx, rtnl_lock semaphore */
953 if (++fep->tx_slot == NUM_TX_BUFF) 990static int emac_close(struct net_device *ndev)
954 fep->tx_slot = 0; 991{
955 } 992 struct ocp_enet_private *dev = ndev->priv;
993 struct ocp_func_emac_data *emacdata = dev->def->additions;
956 994
957 for (f = 0; f < nr_frags; f++) { 995 DBG("%d: close" NL, dev->def->index);
958 struct skb_frag_struct *frag;
959 996
960 frag = &skb_shinfo(skb)->frags[f]; 997 local_bh_disable();
961 len = frag->size;
962 offset = 0;
963
964 while (len) {
965 size = min(len, DESC_BUF_SIZE);
966
967 dma_map_page(&fep->ocpdev->dev,
968 frag->page,
969 frag->page_offset + offset,
970 size, DMA_TO_DEVICE);
971
972 ctrl = EMAC_TX_CTRL_DFLT | MAL_TX_CTRL_READY;
973 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
974 ctrl |= MAL_TX_CTRL_WRAP;
975 if ((f == (nr_frags - 1)) && (len == size)) {
976 ctrl |= MAL_TX_CTRL_LAST;
977 fep->tx_skb[fep->tx_slot] = skb;
978 }
979 998
980 if (skb->ip_summed == CHECKSUM_HW) 999 if (dev->phy.address >= 0)
981 ctrl |= EMAC_TX_CTRL_TAH_CSUM; 1000 del_timer_sync(&dev->link_timer);
982 1001
983 fep->tx_desc[fep->tx_slot].data_len = (short)size; 1002 netif_stop_queue(ndev);
984 fep->tx_desc[fep->tx_slot].data_ptr = 1003 emac_rx_disable(dev);
985 (char *)((page_to_pfn(frag->page) << PAGE_SHIFT) + 1004 emac_tx_disable(dev);
986 frag->page_offset + offset); 1005 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
987 fep->tx_desc[fep->tx_slot].ctrl = ctrl; 1006 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1007 mal_poll_del(dev->mal, &dev->commac);
1008 local_bh_enable();
988 1009
989 len -= size; 1010 emac_clean_tx_ring(dev);
990 offset += size; 1011 emac_clean_rx_ring(dev);
1012 free_irq(dev->def->irq, dev);
991 1013
992 /* Bump tx count */ 1014 return 0;
993 if (++fep->tx_cnt == NUM_TX_BUFF) 1015}
994 netif_stop_queue(dev);
995 1016
996 /* Next descriptor */ 1017static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
997 if (++fep->tx_slot == NUM_TX_BUFF) 1018 struct sk_buff *skb)
998 fep->tx_slot = 0; 1019{
999 } 1020#if defined(CONFIG_IBM_EMAC_TAH)
1021 if (skb->ip_summed == CHECKSUM_HW) {
1022 ++dev->stats.tx_packets_csum;
1023 return EMAC_TX_CTRL_TAH_CSUM;
1000 } 1024 }
1025#endif
1026 return 0;
1027}
1001 1028
1002 /* 1029static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1003 * Deferred set READY on first descriptor of packet to 1030{
1004 * avoid TX MAL race. 1031 struct emac_regs *p = dev->emacp;
1005 */ 1032 struct net_device *ndev = dev->ndev;
1006 fep->tx_desc[tx_slot_first].ctrl |= MAL_TX_CTRL_READY;
1007
1008 /* Send the packet out. */
1009 out_be32(&emacp->em0tmr0, EMAC_TMR0_XMIT);
1010 1033
1011 fep->stats.tx_packets++; 1034 /* Send the packet out */
1012 fep->stats.tx_bytes += skb->len; 1035 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1013 1036
1014 PKT_DEBUG(("emac_start_xmit() exitn")); 1037 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1038 netif_stop_queue(ndev);
1039 DBG2("%d: stopped TX queue" NL, dev->def->index);
1040 }
1015 1041
1016 spin_unlock_irqrestore(&fep->lock, flags); 1042 ndev->trans_start = jiffies;
1043 ++dev->stats.tx_packets;
1044 dev->stats.tx_bytes += len;
1017 1045
1018 return 0; 1046 return 0;
1019} 1047}
1020 1048
1021static int emac_adjust_to_link(struct ocp_enet_private *fep) 1049/* BHs disabled */
1050static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1022{ 1051{
1023 emac_t *emacp = fep->emacp; 1052 struct ocp_enet_private *dev = ndev->priv;
1024 unsigned long mode_reg; 1053 unsigned int len = skb->len;
1025 int full_duplex, speed; 1054 int slot;
1026
1027 full_duplex = 0;
1028 speed = SPEED_10;
1029 1055
1030 /* set mode register 1 defaults */ 1056 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1031 mode_reg = EMAC_M1_DEFAULT; 1057 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1032 1058
1033 /* Read link mode on PHY */ 1059 slot = dev->tx_slot++;
1034 if (fep->phy_mii.def->ops->read_link(&fep->phy_mii) == 0) { 1060 if (dev->tx_slot == NUM_TX_BUFF) {
1035 /* If an error occurred, we don't deal with it yet */ 1061 dev->tx_slot = 0;
1036 full_duplex = (fep->phy_mii.duplex == DUPLEX_FULL); 1062 ctrl |= MAL_TX_CTRL_WRAP;
1037 speed = fep->phy_mii.speed;
1038 } 1063 }
1039 1064
1065 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1040 1066
1041 /* set speed (default is 10Mb) */ 1067 dev->tx_skb[slot] = skb;
1042 switch (speed) { 1068 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1043 case SPEED_1000: 1069 DMA_TO_DEVICE);
1044 mode_reg |= EMAC_M1_RFS_16K; 1070 dev->tx_desc[slot].data_len = (u16) len;
1045 if (fep->rgmii_dev) { 1071 barrier();
1046 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(fep->rgmii_dev); 1072 dev->tx_desc[slot].ctrl = ctrl;
1047
1048 if ((rgmii->mode[fep->rgmii_input] == RTBI)
1049 || (rgmii->mode[fep->rgmii_input] == TBI))
1050 mode_reg |= EMAC_M1_MF_1000GPCS;
1051 else
1052 mode_reg |= EMAC_M1_MF_1000MBPS;
1053
1054 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1055 1000);
1056 }
1057 break;
1058 case SPEED_100:
1059 mode_reg |= EMAC_M1_MF_100MBPS | EMAC_M1_RFS_4K;
1060 if (fep->rgmii_dev)
1061 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1062 100);
1063 if (fep->zmii_dev)
1064 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1065 100);
1066 break;
1067 case SPEED_10:
1068 default:
1069 mode_reg = (mode_reg & ~EMAC_M1_MF_100MBPS) | EMAC_M1_RFS_4K;
1070 if (fep->rgmii_dev)
1071 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1072 10);
1073 if (fep->zmii_dev)
1074 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1075 10);
1076 }
1077
1078 if (full_duplex)
1079 mode_reg |= EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_IST;
1080 else
1081 mode_reg &= ~(EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_ILE);
1082
1083 LINK_DEBUG(("%s: adjust to link, speed: %d, duplex: %d, opened: %d\n",
1084 fep->ndev->name, speed, full_duplex, fep->opened));
1085 1073
1086 printk(KERN_INFO "%s: Speed: %d, %s duplex.\n", 1074 return emac_xmit_finish(dev, len);
1087 fep->ndev->name, speed, full_duplex ? "Full" : "Half");
1088 if (fep->opened)
1089 out_be32(&emacp->em0mr1, mode_reg);
1090
1091 return 0;
1092} 1075}
1093 1076
1094static int emac_set_mac_address(struct net_device *ndev, void *p) 1077#if defined(CONFIG_IBM_EMAC_TAH)
1078static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1079 u32 pd, int len, int last, u16 base_ctrl)
1095{ 1080{
1096 struct ocp_enet_private *fep = ndev->priv; 1081 while (1) {
1097 emac_t *emacp = fep->emacp; 1082 u16 ctrl = base_ctrl;
1098 struct sockaddr *addr = p; 1083 int chunk = min(len, MAL_MAX_TX_SIZE);
1084 len -= chunk;
1099 1085
1100 if (!is_valid_ether_addr(addr->sa_data)) 1086 slot = (slot + 1) % NUM_TX_BUFF;
1101 return -EADDRNOTAVAIL;
1102 1087
1103 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 1088 if (last && !len)
1089 ctrl |= MAL_TX_CTRL_LAST;
1090 if (slot == NUM_TX_BUFF - 1)
1091 ctrl |= MAL_TX_CTRL_WRAP;
1104 1092
1105 /* set the high address */ 1093 dev->tx_skb[slot] = NULL;
1106 out_be32(&emacp->em0iahr, 1094 dev->tx_desc[slot].data_ptr = pd;
1107 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]); 1095 dev->tx_desc[slot].data_len = (u16) chunk;
1096 dev->tx_desc[slot].ctrl = ctrl;
1097 ++dev->tx_cnt;
1108 1098
1109 /* set the low address */ 1099 if (!len)
1110 out_be32(&emacp->em0ialr, 1100 break;
1111 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1112 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1113 1101
1114 return 0; 1102 pd += chunk;
1103 }
1104 return slot;
1115} 1105}
1116 1106
1117static int emac_change_mtu(struct net_device *dev, int new_mtu) 1107/* BHs disabled (SG version for TAH equipped EMACs) */
1108static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1118{ 1109{
1119 struct ocp_enet_private *fep = dev->priv; 1110 struct ocp_enet_private *dev = ndev->priv;
1120 int old_mtu = dev->mtu; 1111 int nr_frags = skb_shinfo(skb)->nr_frags;
1121 unsigned long mode_reg; 1112 int len = skb->len, chunk;
1122 emac_t *emacp = fep->emacp; 1113 int slot, i;
1123 u32 em0mr0; 1114 u16 ctrl;
1124 int i, full; 1115 u32 pd;
1125 unsigned long flags;
1126 1116
1127 if ((new_mtu < EMAC_MIN_MTU) || (new_mtu > EMAC_MAX_MTU)) { 1117 /* This is common "fast" path */
1128 printk(KERN_ERR 1118 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1129 "emac: Invalid MTU setting, MTU must be between %d and %d\n", 1119 return emac_start_xmit(skb, ndev);
1130 EMAC_MIN_MTU, EMAC_MAX_MTU);
1131 return -EINVAL;
1132 }
1133 1120
1134 if (old_mtu != new_mtu && netif_running(dev)) { 1121 len -= skb->data_len;
1135 /* Stop rx engine */
1136 em0mr0 = in_be32(&emacp->em0mr0);
1137 out_be32(&emacp->em0mr0, em0mr0 & ~EMAC_M0_RXE);
1138
1139 /* Wait for descriptors to be empty */
1140 do {
1141 full = 0;
1142 for (i = 0; i < NUM_RX_BUFF; i++)
1143 if (!(fep->rx_desc[i].ctrl & MAL_RX_CTRL_EMPTY)) {
1144 printk(KERN_NOTICE
1145 "emac: RX ring is still full\n");
1146 full = 1;
1147 }
1148 } while (full);
1149
1150 spin_lock_irqsave(&fep->lock, flags);
1151
1152 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1153
1154 /* Destroy all old rx skbs */
1155 for (i = 0; i < NUM_RX_BUFF; i++) {
1156 dma_unmap_single(&fep->ocpdev->dev,
1157 fep->rx_desc[i].data_ptr,
1158 fep->rx_desc[i].data_len,
1159 DMA_FROM_DEVICE);
1160 dev_kfree_skb(fep->rx_skb[i]);
1161 fep->rx_skb[i] = NULL;
1162 }
1163 1122
1164 /* Set new rx_buffer_size, jumbo cap, and advertise new mtu */ 1123 /* Note, this is only an *estimation*, we can still run out of empty
1165 mode_reg = in_be32(&emacp->em0mr1); 1124 * slots because of the additional fragmentation into
1166 if (new_mtu > ENET_DEF_MTU_SIZE) { 1125 * MAL_MAX_TX_SIZE-sized chunks
1167 mode_reg |= EMAC_M1_JUMBO_ENABLE; 1126 */
1168 fep->rx_buffer_size = EMAC_MAX_FRAME; 1127 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1169 } else { 1128 goto stop_queue;
1170 mode_reg &= ~EMAC_M1_JUMBO_ENABLE; 1129
1171 fep->rx_buffer_size = ENET_DEF_BUF_SIZE; 1130 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1172 } 1131 emac_tx_csum(dev, skb);
1173 dev->mtu = new_mtu; 1132 slot = dev->tx_slot;
1174 out_be32(&emacp->em0mr1, mode_reg); 1133
1134 /* skb data */
1135 dev->tx_skb[slot] = NULL;
1136 chunk = min(len, MAL_MAX_TX_SIZE);
1137 dev->tx_desc[slot].data_ptr = pd =
1138 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1139 dev->tx_desc[slot].data_len = (u16) chunk;
1140 len -= chunk;
1141 if (unlikely(len))
1142 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1143 ctrl);
1144 /* skb fragments */
1145 for (i = 0; i < nr_frags; ++i) {
1146 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1147 len = frag->size;
1175 1148
1176 /* Re-init rx skbs */ 1149 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1177 fep->rx_slot = 0; 1150 goto undo_frame;
1178 emac_rx_fill(dev, 0);
1179 1151
1180 /* Restart the rx engine */ 1152 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1181 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 1153 DMA_TO_DEVICE);
1182 out_be32(&emacp->em0mr0, em0mr0 | EMAC_M0_RXE);
1183 1154
1184 spin_unlock_irqrestore(&fep->lock, flags); 1155 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1156 ctrl);
1185 } 1157 }
1186 1158
1187 return 0; 1159 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1188} 1160 dev->tx_slot, slot);
1189 1161
1190static void __emac_set_multicast_list(struct net_device *dev) 1162 /* Attach skb to the last slot so we don't release it too early */
1191{ 1163 dev->tx_skb[slot] = skb;
1192 struct ocp_enet_private *fep = dev->priv;
1193 emac_t *emacp = fep->emacp;
1194 u32 rmr = in_be32(&emacp->em0rmr);
1195 1164
1196 /* First clear all special bits, they can be set later */ 1165 /* Send the packet out */
1197 rmr &= ~(EMAC_RMR_PME | EMAC_RMR_PMME | EMAC_RMR_MAE); 1166 if (dev->tx_slot == NUM_TX_BUFF - 1)
1167 ctrl |= MAL_TX_CTRL_WRAP;
1168 barrier();
1169 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1170 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1198 1171
1199 if (dev->flags & IFF_PROMISC) { 1172 return emac_xmit_finish(dev, skb->len);
1200 rmr |= EMAC_RMR_PME;
1201 } else if (dev->flags & IFF_ALLMULTI || 32 < dev->mc_count) {
1202 /*
1203 * Must be setting up to use multicast
1204 * Now check for promiscuous multicast
1205 */
1206 rmr |= EMAC_RMR_PMME;
1207 } else if (dev->flags & IFF_MULTICAST && 0 < dev->mc_count) {
1208 unsigned short em0gaht[4] = { 0, 0, 0, 0 };
1209 struct dev_mc_list *dmi;
1210
1211 /* Need to hash on the multicast address. */
1212 for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
1213 unsigned long mc_crc;
1214 unsigned int bit_number;
1215
1216 mc_crc = ether_crc(6, (char *)dmi->dmi_addr);
1217 bit_number = 63 - (mc_crc >> 26); /* MSB: 0 LSB: 63 */
1218 em0gaht[bit_number >> 4] |=
1219 0x8000 >> (bit_number & 0x0f);
1220 }
1221 emacp->em0gaht1 = em0gaht[0];
1222 emacp->em0gaht2 = em0gaht[1];
1223 emacp->em0gaht3 = em0gaht[2];
1224 emacp->em0gaht4 = em0gaht[3];
1225 1173
1226 /* Turn on multicast addressing */ 1174 undo_frame:
1227 rmr |= EMAC_RMR_MAE; 1175 /* Well, too bad. Our previous estimation was overly optimistic.
1176 * Undo everything.
1177 */
1178 while (slot != dev->tx_slot) {
1179 dev->tx_desc[slot].ctrl = 0;
1180 --dev->tx_cnt;
1181 if (--slot < 0)
1182 slot = NUM_TX_BUFF - 1;
1228 } 1183 }
1229 out_be32(&emacp->em0rmr, rmr); 1184 ++dev->estats.tx_undo;
1185
1186 stop_queue:
1187 netif_stop_queue(ndev);
1188 DBG2("%d: stopped TX queue" NL, dev->def->index);
1189 return 1;
1230} 1190}
1191#else
1192# define emac_start_xmit_sg emac_start_xmit
1193#endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1231 1194
1232static int emac_init_tah(struct ocp_enet_private *fep) 1195/* BHs disabled */
1196static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1233{ 1197{
1234 tah_t *tahp; 1198 struct ibm_emac_error_stats *st = &dev->estats;
1199 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1200
1201 ++st->tx_bd_errors;
1202 if (ctrl & EMAC_TX_ST_BFCS)
1203 ++st->tx_bd_bad_fcs;
1204 if (ctrl & EMAC_TX_ST_LCS)
1205 ++st->tx_bd_carrier_loss;
1206 if (ctrl & EMAC_TX_ST_ED)
1207 ++st->tx_bd_excessive_deferral;
1208 if (ctrl & EMAC_TX_ST_EC)
1209 ++st->tx_bd_excessive_collisions;
1210 if (ctrl & EMAC_TX_ST_LC)
1211 ++st->tx_bd_late_collision;
1212 if (ctrl & EMAC_TX_ST_MC)
1213 ++st->tx_bd_multple_collisions;
1214 if (ctrl & EMAC_TX_ST_SC)
1215 ++st->tx_bd_single_collision;
1216 if (ctrl & EMAC_TX_ST_UR)
1217 ++st->tx_bd_underrun;
1218 if (ctrl & EMAC_TX_ST_SQE)
1219 ++st->tx_bd_sqe;
1220}
1235 1221
1236 /* Initialize TAH and enable checksum verification */ 1222static void emac_poll_tx(void *param)
1237 tahp = (tah_t *) ioremap(fep->tah_dev->def->paddr, sizeof(*tahp)); 1223{
1224 struct ocp_enet_private *dev = param;
1225 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1226 dev->ack_slot);
1227
1228 if (dev->tx_cnt) {
1229 u16 ctrl;
1230 int slot = dev->ack_slot, n = 0;
1231 again:
1232 ctrl = dev->tx_desc[slot].ctrl;
1233 if (!(ctrl & MAL_TX_CTRL_READY)) {
1234 struct sk_buff *skb = dev->tx_skb[slot];
1235 ++n;
1236
1237 if (skb) {
1238 dev_kfree_skb(skb);
1239 dev->tx_skb[slot] = NULL;
1240 }
1241 slot = (slot + 1) % NUM_TX_BUFF;
1238 1242
1239 if (tahp == NULL) { 1243 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1240 printk(KERN_ERR "tah%d: Cannot ioremap TAH registers!\n", 1244 emac_parse_tx_error(dev, ctrl);
1241 fep->tah_dev->def->index);
1242 1245
1243 return -ENOMEM; 1246 if (--dev->tx_cnt)
1244 } 1247 goto again;
1245 1248 }
1246 out_be32(&tahp->tah_mr, TAH_MR_SR); 1249 if (n) {
1250 dev->ack_slot = slot;
1251 if (netif_queue_stopped(dev->ndev) &&
1252 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1253 netif_wake_queue(dev->ndev);
1247 1254
1248 /* wait for reset to complete */ 1255 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1249 while (in_be32(&tahp->tah_mr) & TAH_MR_SR) ; 1256 }
1257 }
1258}
1250 1259
1251 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ 1260static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1252 out_be32(&tahp->tah_mr, 1261 int len)
1253 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | 1262{
1254 TAH_MR_DIG); 1263 struct sk_buff *skb = dev->rx_skb[slot];
1264 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1255 1265
1256 iounmap(tahp); 1266 if (len)
1267 dma_map_single(dev->ldev, skb->data - 2,
1268 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1257 1269
1258 return 0; 1270 dev->rx_desc[slot].data_len = 0;
1271 barrier();
1272 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1273 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1259} 1274}
1260 1275
1261static void emac_init_rings(struct net_device *dev) 1276static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1262{ 1277{
1263 struct ocp_enet_private *ep = dev->priv; 1278 struct ibm_emac_error_stats *st = &dev->estats;
1264 int loop; 1279 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1280
1281 ++st->rx_bd_errors;
1282 if (ctrl & EMAC_RX_ST_OE)
1283 ++st->rx_bd_overrun;
1284 if (ctrl & EMAC_RX_ST_BP)
1285 ++st->rx_bd_bad_packet;
1286 if (ctrl & EMAC_RX_ST_RP)
1287 ++st->rx_bd_runt_packet;
1288 if (ctrl & EMAC_RX_ST_SE)
1289 ++st->rx_bd_short_event;
1290 if (ctrl & EMAC_RX_ST_AE)
1291 ++st->rx_bd_alignment_error;
1292 if (ctrl & EMAC_RX_ST_BFCS)
1293 ++st->rx_bd_bad_fcs;
1294 if (ctrl & EMAC_RX_ST_PTL)
1295 ++st->rx_bd_packet_too_long;
1296 if (ctrl & EMAC_RX_ST_ORE)
1297 ++st->rx_bd_out_of_range;
1298 if (ctrl & EMAC_RX_ST_IRE)
1299 ++st->rx_bd_in_range;
1300}
1265 1301
1266 ep->tx_desc = (struct mal_descriptor *)((char *)ep->mal->tx_virt_addr + 1302static inline void emac_rx_csum(struct ocp_enet_private *dev,
1267 (ep->mal_tx_chan * 1303 struct sk_buff *skb, u16 ctrl)
1268 MAL_DT_ALIGN)); 1304{
1269 ep->rx_desc = 1305#if defined(CONFIG_IBM_EMAC_TAH)
1270 (struct mal_descriptor *)((char *)ep->mal->rx_virt_addr + 1306 if (!ctrl && dev->tah_dev) {
1271 (ep->mal_rx_chan * MAL_DT_ALIGN)); 1307 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308 ++dev->stats.rx_packets_csum;
1309 }
1310#endif
1311}
1272 1312
1273 /* Fill in the transmit descriptor ring. */ 1313static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1274 for (loop = 0; loop < NUM_TX_BUFF; loop++) { 1314{
1275 if (ep->tx_skb[loop]) { 1315 if (likely(dev->rx_sg_skb != NULL)) {
1276 dma_unmap_single(&ep->ocpdev->dev, 1316 int len = dev->rx_desc[slot].data_len;
1277 ep->tx_desc[loop].data_ptr, 1317 int tot_len = dev->rx_sg_skb->len + len;
1278 ep->tx_desc[loop].data_len, 1318
1279 DMA_TO_DEVICE); 1319 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1280 dev_kfree_skb_irq(ep->tx_skb[loop]); 1320 ++dev->estats.rx_dropped_mtu;
1321 dev_kfree_skb(dev->rx_sg_skb);
1322 dev->rx_sg_skb = NULL;
1323 } else {
1324 cacheable_memcpy(dev->rx_sg_skb->tail,
1325 dev->rx_skb[slot]->data, len);
1326 skb_put(dev->rx_sg_skb, len);
1327 emac_recycle_rx_skb(dev, slot, len);
1328 return 0;
1281 } 1329 }
1282 ep->tx_skb[loop] = NULL;
1283 ep->tx_desc[loop].ctrl = 0;
1284 ep->tx_desc[loop].data_len = 0;
1285 ep->tx_desc[loop].data_ptr = NULL;
1286 }
1287 ep->tx_desc[loop - 1].ctrl |= MAL_TX_CTRL_WRAP;
1288
1289 /* Format the receive descriptor ring. */
1290 ep->rx_slot = 0;
1291 /* Default is MTU=1500 + Ethernet overhead */
1292 ep->rx_buffer_size = dev->mtu + ENET_HEADER_SIZE + ENET_FCS_SIZE;
1293 emac_rx_fill(dev, 0);
1294 if (ep->rx_slot != 0) {
1295 printk(KERN_ERR
1296 "%s: Not enough mem for RxChain durning Open?\n",
1297 dev->name);
1298 /*We couldn't fill the ring at startup?
1299 *We could clean up and fail to open but right now we will try to
1300 *carry on. It may be a sign of a bad NUM_RX_BUFF value
1301 */
1302 } 1330 }
1303 1331 emac_recycle_rx_skb(dev, slot, 0);
1304 ep->tx_cnt = 0; 1332 return -1;
1305 ep->tx_slot = 0;
1306 ep->ack_slot = 0;
1307} 1333}
1308 1334
1309static void emac_reset_configure(struct ocp_enet_private *fep) 1335/* BHs disabled */
1336static int emac_poll_rx(void *param, int budget)
1310{ 1337{
1311 emac_t *emacp = fep->emacp; 1338 struct ocp_enet_private *dev = param;
1312 int i; 1339 int slot = dev->rx_slot, received = 0;
1313
1314 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1315 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1316 1340
1317 /* 1341 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1318 * Check for a link, some PHYs don't provide a clock if
1319 * no link is present. Some EMACs will not come out of
1320 * soft reset without a PHY clock present.
1321 */
1322 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1323 /* Reset the EMAC */
1324 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1325 udelay(20);
1326 for (i = 0; i < 100; i++) {
1327 if ((in_be32(&emacp->em0mr0) & EMAC_M0_SRST) == 0)
1328 break;
1329 udelay(10);
1330 }
1331 1342
1332 if (i >= 100) { 1343 again:
1333 printk(KERN_ERR "%s: Cannot reset EMAC\n", 1344 while (budget > 0) {
1334 fep->ndev->name); 1345 int len;
1335 return; 1346 struct sk_buff *skb;
1336 } 1347 u16 ctrl = dev->rx_desc[slot].ctrl;
1337 }
1338 1348
1339 /* Switch IRQs off for now */ 1349 if (ctrl & MAL_RX_CTRL_EMPTY)
1340 out_be32(&emacp->em0iser, 0); 1350 break;
1341
1342 /* Configure MAL rx channel */
1343 mal_set_rcbs(fep->mal, fep->mal_rx_chan, DESC_BUF_SIZE_REG);
1344 1351
1345 /* set the high address */ 1352 skb = dev->rx_skb[slot];
1346 out_be32(&emacp->em0iahr, 1353 barrier();
1347 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]); 1354 len = dev->rx_desc[slot].data_len;
1348 1355
1349 /* set the low address */ 1356 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1350 out_be32(&emacp->em0ialr, 1357 goto sg;
1351 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1352 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1353 1358
1354 /* Adjust to link */ 1359 ctrl &= EMAC_BAD_RX_MASK;
1355 if (netif_carrier_ok(fep->ndev)) 1360 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1356 emac_adjust_to_link(fep); 1361 emac_parse_rx_error(dev, ctrl);
1362 ++dev->estats.rx_dropped_error;
1363 emac_recycle_rx_skb(dev, slot, 0);
1364 len = 0;
1365 goto next;
1366 }
1357 1367
1358 /* enable broadcast/individual address and RX FIFO defaults */ 1368 if (len && len < EMAC_RX_COPY_THRESH) {
1359 out_be32(&emacp->em0rmr, EMAC_RMR_DEFAULT); 1369 struct sk_buff *copy_skb =
1370 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1371 if (unlikely(!copy_skb))
1372 goto oom;
1373
1374 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1375 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1376 len + 2);
1377 emac_recycle_rx_skb(dev, slot, len);
1378 skb = copy_skb;
1379 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1380 goto oom;
1381
1382 skb_put(skb, len);
1383 push_packet:
1384 skb->dev = dev->ndev;
1385 skb->protocol = eth_type_trans(skb, dev->ndev);
1386 emac_rx_csum(dev, skb, ctrl);
1387
1388 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1389 ++dev->estats.rx_dropped_stack;
1390 next:
1391 ++dev->stats.rx_packets;
1392 skip:
1393 dev->stats.rx_bytes += len;
1394 slot = (slot + 1) % NUM_RX_BUFF;
1395 --budget;
1396 ++received;
1397 continue;
1398 sg:
1399 if (ctrl & MAL_RX_CTRL_FIRST) {
1400 BUG_ON(dev->rx_sg_skb);
1401 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1402 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1403 ++dev->estats.rx_dropped_oom;
1404 emac_recycle_rx_skb(dev, slot, 0);
1405 } else {
1406 dev->rx_sg_skb = skb;
1407 skb_put(skb, len);
1408 }
1409 } else if (!emac_rx_sg_append(dev, slot) &&
1410 (ctrl & MAL_RX_CTRL_LAST)) {
1411
1412 skb = dev->rx_sg_skb;
1413 dev->rx_sg_skb = NULL;
1414
1415 ctrl &= EMAC_BAD_RX_MASK;
1416 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1417 emac_parse_rx_error(dev, ctrl);
1418 ++dev->estats.rx_dropped_error;
1419 dev_kfree_skb(skb);
1420 len = 0;
1421 } else
1422 goto push_packet;
1423 }
1424 goto skip;
1425 oom:
1426 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1427 /* Drop the packet and recycle skb */
1428 ++dev->estats.rx_dropped_oom;
1429 emac_recycle_rx_skb(dev, slot, 0);
1430 goto next;
1431 }
1360 1432
1361 /* set transmit request threshold register */ 1433 if (received) {
1362 out_be32(&emacp->em0trtr, EMAC_TRTR_DEFAULT); 1434 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1435 dev->rx_slot = slot;
1436 }
1363 1437
1364 /* Reconfigure multicast */ 1438 if (unlikely(budget && dev->commac.rx_stopped)) {
1365 __emac_set_multicast_list(fep->ndev); 1439 struct ocp_func_emac_data *emacdata = dev->def->additions;
1366 1440
1367 /* Set receiver/transmitter defaults */ 1441 barrier();
1368 out_be32(&emacp->em0rwmr, EMAC_RWMR_DEFAULT); 1442 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1369 out_be32(&emacp->em0tmr0, EMAC_TMR0_DEFAULT); 1443 DBG2("%d: rx restart" NL, dev->def->index);
1370 out_be32(&emacp->em0tmr1, EMAC_TMR1_DEFAULT); 1444 received = 0;
1445 goto again;
1446 }
1371 1447
1372 /* set frame gap */ 1448 if (dev->rx_sg_skb) {
1373 out_be32(&emacp->em0ipgvr, CONFIG_IBM_EMAC_FGAP); 1449 DBG2("%d: dropping partial rx packet" NL,
1374 1450 dev->def->index);
1375 /* set VLAN Tag Protocol Identifier */ 1451 ++dev->estats.rx_dropped_error;
1376 out_be32(&emacp->em0vtpid, 0x8100); 1452 dev_kfree_skb(dev->rx_sg_skb);
1453 dev->rx_sg_skb = NULL;
1454 }
1377 1455
1378 /* Init ring buffers */ 1456 dev->commac.rx_stopped = 0;
1379 emac_init_rings(fep->ndev); 1457 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1458 emac_rx_enable(dev);
1459 dev->rx_slot = 0;
1460 }
1461 return received;
1380} 1462}
1381 1463
1382static void emac_kick(struct ocp_enet_private *fep) 1464/* BHs disabled */
1465static int emac_peek_rx(void *param)
1383{ 1466{
1384 emac_t *emacp = fep->emacp; 1467 struct ocp_enet_private *dev = param;
1385 unsigned long emac_ier; 1468 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1386 1469}
1387 emac_ier = EMAC_ISR_PP | EMAC_ISR_BP | EMAC_ISR_RP |
1388 EMAC_ISR_SE | EMAC_ISR_PTLE | EMAC_ISR_ALE |
1389 EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1390 1470
1391 out_be32(&emacp->em0iser, emac_ier); 1471/* BHs disabled */
1472static int emac_peek_rx_sg(void *param)
1473{
1474 struct ocp_enet_private *dev = param;
1475 int slot = dev->rx_slot;
1476 while (1) {
1477 u16 ctrl = dev->rx_desc[slot].ctrl;
1478 if (ctrl & MAL_RX_CTRL_EMPTY)
1479 return 0;
1480 else if (ctrl & MAL_RX_CTRL_LAST)
1481 return 1;
1392 1482
1393 /* enable all MAL transmit and receive channels */ 1483 slot = (slot + 1) % NUM_RX_BUFF;
1394 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1395 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1396 1484
1397 /* set transmit and receive enable */ 1485 /* I'm just being paranoid here :) */
1398 out_be32(&emacp->em0mr0, EMAC_M0_TXE | EMAC_M0_RXE); 1486 if (unlikely(slot == dev->rx_slot))
1487 return 0;
1488 }
1399} 1489}
1400 1490
1401static void 1491/* Hard IRQ */
1402emac_start_link(struct ocp_enet_private *fep, struct ethtool_cmd *ep) 1492static void emac_rxde(void *param)
1403{ 1493{
1404 u32 advertise; 1494 struct ocp_enet_private *dev = param;
1405 int autoneg; 1495 ++dev->estats.rx_stopped;
1406 int forced_speed; 1496 emac_rx_disable_async(dev);
1407 int forced_duplex; 1497}
1408 1498
1409 /* Default advertise */ 1499/* Hard IRQ */
1410 advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 1500static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1411 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 1501{
1412 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; 1502 struct ocp_enet_private *dev = dev_instance;
1413 autoneg = fep->want_autoneg; 1503 struct emac_regs *p = dev->emacp;
1414 forced_speed = fep->phy_mii.speed; 1504 struct ibm_emac_error_stats *st = &dev->estats;
1415 forced_duplex = fep->phy_mii.duplex; 1505
1506 u32 isr = in_be32(&p->isr);
1507 out_be32(&p->isr, isr);
1508
1509 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1510
1511 if (isr & EMAC_ISR_TXPE)
1512 ++st->tx_parity;
1513 if (isr & EMAC_ISR_RXPE)
1514 ++st->rx_parity;
1515 if (isr & EMAC_ISR_TXUE)
1516 ++st->tx_underrun;
1517 if (isr & EMAC_ISR_RXOE)
1518 ++st->rx_fifo_overrun;
1519 if (isr & EMAC_ISR_OVR)
1520 ++st->rx_overrun;
1521 if (isr & EMAC_ISR_BP)
1522 ++st->rx_bad_packet;
1523 if (isr & EMAC_ISR_RP)
1524 ++st->rx_runt_packet;
1525 if (isr & EMAC_ISR_SE)
1526 ++st->rx_short_event;
1527 if (isr & EMAC_ISR_ALE)
1528 ++st->rx_alignment_error;
1529 if (isr & EMAC_ISR_BFCS)
1530 ++st->rx_bad_fcs;
1531 if (isr & EMAC_ISR_PTLE)
1532 ++st->rx_packet_too_long;
1533 if (isr & EMAC_ISR_ORE)
1534 ++st->rx_out_of_range;
1535 if (isr & EMAC_ISR_IRE)
1536 ++st->rx_in_range;
1537 if (isr & EMAC_ISR_SQE)
1538 ++st->tx_sqe;
1539 if (isr & EMAC_ISR_TE)
1540 ++st->tx_errors;
1416 1541
1417 /* Setup link parameters */ 1542 return IRQ_HANDLED;
1418 if (ep) { 1543}
1419 if (ep->autoneg == AUTONEG_ENABLE) {
1420 advertise = ep->advertising;
1421 autoneg = 1;
1422 } else {
1423 autoneg = 0;
1424 forced_speed = ep->speed;
1425 forced_duplex = ep->duplex;
1426 }
1427 }
1428 1544
1429 /* Configure PHY & start aneg */ 1545static struct net_device_stats *emac_stats(struct net_device *ndev)
1430 fep->want_autoneg = autoneg; 1546{
1431 if (autoneg) { 1547 struct ocp_enet_private *dev = ndev->priv;
1432 LINK_DEBUG(("%s: start link aneg, advertise: 0x%x\n", 1548 struct ibm_emac_stats *st = &dev->stats;
1433 fep->ndev->name, advertise)); 1549 struct ibm_emac_error_stats *est = &dev->estats;
1434 fep->phy_mii.def->ops->setup_aneg(&fep->phy_mii, advertise); 1550 struct net_device_stats *nst = &dev->nstats;
1435 } else { 1551
1436 LINK_DEBUG(("%s: start link forced, speed: %d, duplex: %d\n", 1552 DBG2("%d: stats" NL, dev->def->index);
1437 fep->ndev->name, forced_speed, forced_duplex)); 1553
1438 fep->phy_mii.def->ops->setup_forced(&fep->phy_mii, forced_speed, 1554 /* Compute "legacy" statistics */
1439 forced_duplex); 1555 local_irq_disable();
1440 } 1556 nst->rx_packets = (unsigned long)st->rx_packets;
1441 fep->timer_ticks = 0; 1557 nst->rx_bytes = (unsigned long)st->rx_bytes;
1442 mod_timer(&fep->link_timer, jiffies + HZ); 1558 nst->tx_packets = (unsigned long)st->tx_packets;
1559 nst->tx_bytes = (unsigned long)st->tx_bytes;
1560 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1561 est->rx_dropped_error +
1562 est->rx_dropped_resize +
1563 est->rx_dropped_mtu);
1564 nst->tx_dropped = (unsigned long)est->tx_dropped;
1565
1566 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1567 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1568 est->rx_fifo_overrun +
1569 est->rx_overrun);
1570 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1571 est->rx_alignment_error);
1572 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1573 est->rx_bad_fcs);
1574 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1575 est->rx_bd_short_event +
1576 est->rx_bd_packet_too_long +
1577 est->rx_bd_out_of_range +
1578 est->rx_bd_in_range +
1579 est->rx_runt_packet +
1580 est->rx_short_event +
1581 est->rx_packet_too_long +
1582 est->rx_out_of_range +
1583 est->rx_in_range);
1584
1585 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1586 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1587 est->tx_underrun);
1588 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1589 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1590 est->tx_bd_excessive_collisions +
1591 est->tx_bd_late_collision +
1592 est->tx_bd_multple_collisions);
1593 local_irq_enable();
1594 return nst;
1443} 1595}
1444 1596
1445static void emac_link_timer(unsigned long data) 1597static void emac_remove(struct ocp_device *ocpdev)
1446{ 1598{
1447 struct ocp_enet_private *fep = (struct ocp_enet_private *)data; 1599 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1448 int link;
1449 1600
1450 if (fep->going_away) 1601 DBG("%d: remove" NL, dev->def->index);
1451 return;
1452 1602
1453 spin_lock_irq(&fep->lock); 1603 ocp_set_drvdata(ocpdev, 0);
1604 unregister_netdev(dev->ndev);
1454 1605
1455 link = fep->phy_mii.def->ops->poll_link(&fep->phy_mii); 1606 tah_fini(dev->tah_dev);
1456 LINK_DEBUG(("%s: poll_link: %d\n", fep->ndev->name, link)); 1607 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1608 zmii_fini(dev->zmii_dev, dev->zmii_input);
1457 1609
1458 if (link == netif_carrier_ok(fep->ndev)) { 1610 emac_dbg_register(dev->def->index, 0);
1459 if (!link && fep->want_autoneg && (++fep->timer_ticks) > 10) 1611
1460 emac_start_link(fep, NULL); 1612 mal_unregister_commac(dev->mal, &dev->commac);
1461 goto out; 1613 iounmap((void *)dev->emacp);
1462 } 1614 kfree(dev->ndev);
1463 printk(KERN_INFO "%s: Link is %s\n", fep->ndev->name,
1464 link ? "Up" : "Down");
1465 if (link) {
1466 netif_carrier_on(fep->ndev);
1467 /* Chip needs a full reset on config change. That sucks, so I
1468 * should ultimately move that to some tasklet to limit
1469 * latency peaks caused by this code
1470 */
1471 emac_reset_configure(fep);
1472 if (fep->opened)
1473 emac_kick(fep);
1474 } else {
1475 fep->timer_ticks = 0;
1476 netif_carrier_off(fep->ndev);
1477 }
1478 out:
1479 mod_timer(&fep->link_timer, jiffies + HZ);
1480 spin_unlock_irq(&fep->lock);
1481} 1615}
1482 1616
1483static void emac_set_multicast_list(struct net_device *dev) 1617static struct mal_commac_ops emac_commac_ops = {
1484{ 1618 .poll_tx = &emac_poll_tx,
1485 struct ocp_enet_private *fep = dev->priv; 1619 .poll_rx = &emac_poll_rx,
1620 .peek_rx = &emac_peek_rx,
1621 .rxde = &emac_rxde,
1622};
1486 1623
1487 spin_lock_irq(&fep->lock); 1624static struct mal_commac_ops emac_commac_sg_ops = {
1488 __emac_set_multicast_list(dev); 1625 .poll_tx = &emac_poll_tx,
1489 spin_unlock_irq(&fep->lock); 1626 .poll_rx = &emac_poll_rx,
1490} 1627 .peek_rx = &emac_peek_rx_sg,
1628 .rxde = &emac_rxde,
1629};
1491 1630
1492static int emac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 1631/* Ethtool support */
1632static int emac_ethtool_get_settings(struct net_device *ndev,
1633 struct ethtool_cmd *cmd)
1493{ 1634{
1494 struct ocp_enet_private *fep = ndev->priv; 1635 struct ocp_enet_private *dev = ndev->priv;
1495 1636
1496 cmd->supported = fep->phy_mii.def->features; 1637 cmd->supported = dev->phy.features;
1497 cmd->port = PORT_MII; 1638 cmd->port = PORT_MII;
1498 cmd->transceiver = XCVR_EXTERNAL; 1639 cmd->phy_address = dev->phy.address;
1499 cmd->phy_address = fep->mii_phy_addr; 1640 cmd->transceiver =
1500 spin_lock_irq(&fep->lock); 1641 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1501 cmd->autoneg = fep->want_autoneg; 1642
1502 cmd->speed = fep->phy_mii.speed; 1643 local_bh_disable();
1503 cmd->duplex = fep->phy_mii.duplex; 1644 cmd->advertising = dev->phy.advertising;
1504 spin_unlock_irq(&fep->lock); 1645 cmd->autoneg = dev->phy.autoneg;
1646 cmd->speed = dev->phy.speed;
1647 cmd->duplex = dev->phy.duplex;
1648 local_bh_enable();
1649
1505 return 0; 1650 return 0;
1506} 1651}
1507 1652
1508static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 1653static int emac_ethtool_set_settings(struct net_device *ndev,
1654 struct ethtool_cmd *cmd)
1509{ 1655{
1510 struct ocp_enet_private *fep = ndev->priv; 1656 struct ocp_enet_private *dev = ndev->priv;
1511 unsigned long features = fep->phy_mii.def->features; 1657 u32 f = dev->phy.features;
1512 1658
1513 if (!capable(CAP_NET_ADMIN)) 1659 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1514 return -EPERM; 1660 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1515 1661
1662 /* Basic sanity checks */
1663 if (dev->phy.address < 0)
1664 return -EOPNOTSUPP;
1516 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 1665 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1517 return -EINVAL; 1666 return -EINVAL;
1518 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 1667 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1519 return -EINVAL; 1668 return -EINVAL;
1520 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) 1669 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1521 return -EINVAL; 1670 return -EINVAL;
1522 if (cmd->autoneg == AUTONEG_DISABLE) 1671
1672 if (cmd->autoneg == AUTONEG_DISABLE) {
1523 switch (cmd->speed) { 1673 switch (cmd->speed) {
1524 case SPEED_10: 1674 case SPEED_10:
1525 if (cmd->duplex == DUPLEX_HALF && 1675 if (cmd->duplex == DUPLEX_HALF
1526 (features & SUPPORTED_10baseT_Half) == 0) 1676 && !(f & SUPPORTED_10baseT_Half))
1527 return -EINVAL; 1677 return -EINVAL;
1528 if (cmd->duplex == DUPLEX_FULL && 1678 if (cmd->duplex == DUPLEX_FULL
1529 (features & SUPPORTED_10baseT_Full) == 0) 1679 && !(f & SUPPORTED_10baseT_Full))
1530 return -EINVAL; 1680 return -EINVAL;
1531 break; 1681 break;
1532 case SPEED_100: 1682 case SPEED_100:
1533 if (cmd->duplex == DUPLEX_HALF && 1683 if (cmd->duplex == DUPLEX_HALF
1534 (features & SUPPORTED_100baseT_Half) == 0) 1684 && !(f & SUPPORTED_100baseT_Half))
1535 return -EINVAL; 1685 return -EINVAL;
1536 if (cmd->duplex == DUPLEX_FULL && 1686 if (cmd->duplex == DUPLEX_FULL
1537 (features & SUPPORTED_100baseT_Full) == 0) 1687 && !(f & SUPPORTED_100baseT_Full))
1538 return -EINVAL; 1688 return -EINVAL;
1539 break; 1689 break;
1540 case SPEED_1000: 1690 case SPEED_1000:
1541 if (cmd->duplex == DUPLEX_HALF && 1691 if (cmd->duplex == DUPLEX_HALF
1542 (features & SUPPORTED_1000baseT_Half) == 0) 1692 && !(f & SUPPORTED_1000baseT_Half))
1543 return -EINVAL; 1693 return -EINVAL;
1544 if (cmd->duplex == DUPLEX_FULL && 1694 if (cmd->duplex == DUPLEX_FULL
1545 (features & SUPPORTED_1000baseT_Full) == 0) 1695 && !(f & SUPPORTED_1000baseT_Full))
1546 return -EINVAL; 1696 return -EINVAL;
1547 break; 1697 break;
1548 default: 1698 default:
1549 return -EINVAL; 1699 return -EINVAL;
1550 } else if ((features & SUPPORTED_Autoneg) == 0) 1700 }
1551 return -EINVAL; 1701
1552 spin_lock_irq(&fep->lock); 1702 local_bh_disable();
1553 emac_start_link(fep, cmd); 1703 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1554 spin_unlock_irq(&fep->lock); 1704 cmd->duplex);
1705
1706 } else {
1707 if (!(f & SUPPORTED_Autoneg))
1708 return -EINVAL;
1709
1710 local_bh_disable();
1711 dev->phy.def->ops->setup_aneg(&dev->phy,
1712 (cmd->advertising & f) |
1713 (dev->phy.advertising &
1714 (ADVERTISED_Pause |
1715 ADVERTISED_Asym_Pause)));
1716 }
1717 emac_force_link_update(dev);
1718 local_bh_enable();
1719
1555 return 0; 1720 return 0;
1556} 1721}
1557 1722
1558static void 1723static void emac_ethtool_get_ringparam(struct net_device *ndev,
1559emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) 1724 struct ethtool_ringparam *rp)
1560{ 1725{
1561 struct ocp_enet_private *fep = ndev->priv; 1726 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1562 1727 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1563 strcpy(info->driver, DRV_NAME);
1564 strcpy(info->version, DRV_VERSION);
1565 info->fw_version[0] = '\0';
1566 sprintf(info->bus_info, "IBM EMAC %d", fep->ocpdev->def->index);
1567 info->regdump_len = 0;
1568} 1728}
1569 1729
1570static int emac_nway_reset(struct net_device *ndev) 1730static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1731 struct ethtool_pauseparam *pp)
1571{ 1732{
1572 struct ocp_enet_private *fep = ndev->priv; 1733 struct ocp_enet_private *dev = ndev->priv;
1734
1735 local_bh_disable();
1736 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1737 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1738 pp->autoneg = 1;
1739
1740 if (dev->phy.duplex == DUPLEX_FULL) {
1741 if (dev->phy.pause)
1742 pp->rx_pause = pp->tx_pause = 1;
1743 else if (dev->phy.asym_pause)
1744 pp->tx_pause = 1;
1745 }
1746 local_bh_enable();
1747}
1573 1748
1574 if (!fep->want_autoneg) 1749static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1575 return -EINVAL; 1750{
1576 spin_lock_irq(&fep->lock); 1751 struct ocp_enet_private *dev = ndev->priv;
1577 emac_start_link(fep, NULL); 1752 return dev->tah_dev != 0;
1578 spin_unlock_irq(&fep->lock);
1579 return 0;
1580} 1753}
1581 1754
1582static u32 emac_get_link(struct net_device *ndev) 1755static int emac_get_regs_len(struct ocp_enet_private *dev)
1583{ 1756{
1584 return netif_carrier_ok(ndev); 1757 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1585} 1758}
1586 1759
1587static struct ethtool_ops emac_ethtool_ops = { 1760static int emac_ethtool_get_regs_len(struct net_device *ndev)
1588 .get_settings = emac_get_settings, 1761{
1589 .set_settings = emac_set_settings, 1762 struct ocp_enet_private *dev = ndev->priv;
1590 .get_drvinfo = emac_get_drvinfo, 1763 return sizeof(struct emac_ethtool_regs_hdr) +
1591 .nway_reset = emac_nway_reset, 1764 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1592 .get_link = emac_get_link 1765 zmii_get_regs_len(dev->zmii_dev) +
1593}; 1766 rgmii_get_regs_len(dev->rgmii_dev) +
1767 tah_get_regs_len(dev->tah_dev);
1768}
1594 1769
1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1770static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1596{ 1771{
1597 struct ocp_enet_private *fep = dev->priv; 1772 struct emac_ethtool_regs_subhdr *hdr = buf;
1598 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1599 1773
1600 switch (cmd) { 1774 hdr->version = EMAC_ETHTOOL_REGS_VER;
1601 case SIOCGMIIPHY: 1775 hdr->index = dev->def->index;
1602 data[0] = fep->mii_phy_addr; 1776 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1603 /* Fall through */ 1777 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1604 case SIOCGMIIREG: 1778}
1605 data[3] = emac_phy_read(dev, fep->mii_phy_addr, data[1]);
1606 return 0;
1607 case SIOCSMIIREG:
1608 if (!capable(CAP_NET_ADMIN))
1609 return -EPERM;
1610 1779
1611 emac_phy_write(dev, fep->mii_phy_addr, data[1], data[2]); 1780static void emac_ethtool_get_regs(struct net_device *ndev,
1612 return 0; 1781 struct ethtool_regs *regs, void *buf)
1613 default: 1782{
1614 return -EOPNOTSUPP; 1783 struct ocp_enet_private *dev = ndev->priv;
1784 struct emac_ethtool_regs_hdr *hdr = buf;
1785
1786 hdr->components = 0;
1787 buf = hdr + 1;
1788
1789 local_irq_disable();
1790 buf = mal_dump_regs(dev->mal, buf);
1791 buf = emac_dump_regs(dev, buf);
1792 if (dev->zmii_dev) {
1793 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1794 buf = zmii_dump_regs(dev->zmii_dev, buf);
1615 } 1795 }
1796 if (dev->rgmii_dev) {
1797 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1798 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1799 }
1800 if (dev->tah_dev) {
1801 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1802 buf = tah_dump_regs(dev->tah_dev, buf);
1803 }
1804 local_irq_enable();
1616} 1805}
1617 1806
1618static int emac_open(struct net_device *dev) 1807static int emac_ethtool_nway_reset(struct net_device *ndev)
1619{ 1808{
1620 struct ocp_enet_private *fep = dev->priv; 1809 struct ocp_enet_private *dev = ndev->priv;
1621 int rc; 1810 int res = 0;
1622 1811
1623 spin_lock_irq(&fep->lock); 1812 DBG("%d: nway_reset" NL, dev->def->index);
1624 1813
1625 fep->opened = 1; 1814 if (dev->phy.address < 0)
1626 netif_carrier_off(dev); 1815 return -EOPNOTSUPP;
1627 1816
1628 /* Reset & configure the chip */ 1817 local_bh_disable();
1629 emac_reset_configure(fep); 1818 if (!dev->phy.autoneg) {
1819 res = -EINVAL;
1820 goto out;
1821 }
1630 1822
1631 spin_unlock_irq(&fep->lock); 1823 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1824 emac_force_link_update(dev);
1632 1825
1633 /* Request our interrupt lines */ 1826 out:
1634 rc = request_irq(dev->irq, emac_mac_irq, 0, "IBM EMAC MAC", dev); 1827 local_bh_enable();
1635 if (rc != 0) { 1828 return res;
1636 printk("dev->irq %d failed\n", dev->irq); 1829}
1637 goto bail;
1638 }
1639 /* Kick the chip rx & tx channels into life */
1640 spin_lock_irq(&fep->lock);
1641 emac_kick(fep);
1642 spin_unlock_irq(&fep->lock);
1643 1830
1644 netif_start_queue(dev); 1831static int emac_ethtool_get_stats_count(struct net_device *ndev)
1645 bail: 1832{
1646 return rc; 1833 return EMAC_ETHTOOL_STATS_COUNT;
1647} 1834}
1648 1835
1649static int emac_close(struct net_device *dev) 1836static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1837 u8 * buf)
1650{ 1838{
1651 struct ocp_enet_private *fep = dev->priv; 1839 if (stringset == ETH_SS_STATS)
1652 emac_t *emacp = fep->emacp; 1840 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1841}
1653 1842
1654 /* XXX Stop IRQ emitting here */ 1843static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1655 spin_lock_irq(&fep->lock); 1844 struct ethtool_stats *estats,
1656 fep->opened = 0; 1845 u64 * tmp_stats)
1657 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask); 1846{
1658 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 1847 struct ocp_enet_private *dev = ndev->priv;
1659 netif_carrier_off(dev); 1848 local_irq_disable();
1660 netif_stop_queue(dev); 1849 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1850 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1851 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1852 local_irq_enable();
1853}
1661 1854
1662 /* 1855static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1663 * Check for a link, some PHYs don't provide a clock if 1856 struct ethtool_drvinfo *info)
1664 * no link is present. Some EMACs will not come out of 1857{
1665 * soft reset without a PHY clock present. 1858 struct ocp_enet_private *dev = ndev->priv;
1666 */
1667 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1668 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1669 udelay(10);
1670 1859
1671 if (emacp->em0mr0 & EMAC_M0_SRST) { 1860 strcpy(info->driver, "ibm_emac");
1672 /*not sure what to do here hopefully it clears before another open */ 1861 strcpy(info->version, DRV_VERSION);
1673 printk(KERN_ERR 1862 info->fw_version[0] = '\0';
1674 "%s: Phy SoftReset didn't clear, no link?\n", 1863 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1675 dev->name); 1864 info->n_stats = emac_ethtool_get_stats_count(ndev);
1676 } 1865 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1677 } 1866}
1678 1867
1679 /* Free the irq's */ 1868static struct ethtool_ops emac_ethtool_ops = {
1680 free_irq(dev->irq, dev); 1869 .get_settings = emac_ethtool_get_settings,
1870 .set_settings = emac_ethtool_set_settings,
1871 .get_drvinfo = emac_ethtool_get_drvinfo,
1681 1872
1682 spin_unlock_irq(&fep->lock); 1873 .get_regs_len = emac_ethtool_get_regs_len,
1874 .get_regs = emac_ethtool_get_regs,
1683 1875
1684 return 0; 1876 .nway_reset = emac_ethtool_nway_reset,
1685}
1686 1877
1687static void emac_remove(struct ocp_device *ocpdev) 1878 .get_ringparam = emac_ethtool_get_ringparam,
1688{ 1879 .get_pauseparam = emac_ethtool_get_pauseparam,
1689 struct net_device *dev = ocp_get_drvdata(ocpdev); 1880
1690 struct ocp_enet_private *ep = dev->priv; 1881 .get_rx_csum = emac_ethtool_get_rx_csum,
1691 1882
1692 /* FIXME: locking, races, ... */ 1883 .get_strings = emac_ethtool_get_strings,
1693 ep->going_away = 1; 1884 .get_stats_count = emac_ethtool_get_stats_count,
1694 ocp_set_drvdata(ocpdev, NULL); 1885 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1695 if (ep->rgmii_dev) 1886
1696 emac_close_rgmii(ep->rgmii_dev); 1887 .get_link = ethtool_op_get_link,
1697 if (ep->zmii_dev) 1888 .get_tx_csum = ethtool_op_get_tx_csum,
1698 emac_close_zmii(ep->zmii_dev); 1889 .get_sg = ethtool_op_get_sg,
1699
1700 unregister_netdev(dev);
1701 del_timer_sync(&ep->link_timer);
1702 mal_unregister_commac(ep->mal, &ep->commac);
1703 iounmap((void *)ep->emacp);
1704 kfree(dev);
1705}
1706
1707struct mal_commac_ops emac_commac_ops = {
1708 .txeob = &emac_txeob_dev,
1709 .txde = &emac_txde_dev,
1710 .rxeob = &emac_rxeob_dev,
1711 .rxde = &emac_rxde_dev,
1712}; 1890};
1713 1891
1714#ifdef CONFIG_NET_POLL_CONTROLLER 1892static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1715static void emac_netpoll(struct net_device *ndev)
1716{ 1893{
1717 emac_rxeob_dev((void *)ndev, 0); 1894 struct ocp_enet_private *dev = ndev->priv;
1718 emac_txeob_dev((void *)ndev, 0); 1895 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1896
1897 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1898
1899 if (dev->phy.address < 0)
1900 return -EOPNOTSUPP;
1901
1902 switch (cmd) {
1903 case SIOCGMIIPHY:
1904 case SIOCDEVPRIVATE:
1905 data[0] = dev->phy.address;
1906 /* Fall through */
1907 case SIOCGMIIREG:
1908 case SIOCDEVPRIVATE + 1:
1909 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1910 return 0;
1911
1912 case SIOCSMIIREG:
1913 case SIOCDEVPRIVATE + 2:
1914 if (!capable(CAP_NET_ADMIN))
1915 return -EPERM;
1916 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1917 return 0;
1918 default:
1919 return -EOPNOTSUPP;
1920 }
1719} 1921}
1720#endif
1721 1922
1722static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) 1923static int __init emac_probe(struct ocp_device *ocpdev)
1723{ 1924{
1724 int deferred_init = 0; 1925 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1725 int rc = 0, i;
1726 struct net_device *ndev; 1926 struct net_device *ndev;
1727 struct ocp_enet_private *ep; 1927 struct ocp_device *maldev;
1728 struct ocp_func_emac_data *emacdata; 1928 struct ocp_enet_private *dev;
1729 int commac_reg = 0; 1929 int err, i;
1730 u32 phy_map; 1930
1931 DBG("%d: probe" NL, ocpdev->def->index);
1731 1932
1732 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1733 if (!emacdata) { 1933 if (!emacdata) {
1734 printk(KERN_ERR "emac%d: Missing additional data!\n", 1934 printk(KERN_ERR "emac%d: Missing additional data!\n",
1735 ocpdev->def->index); 1935 ocpdev->def->index);
@@ -1738,304 +1938,312 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1738 1938
1739 /* Allocate our net_device structure */ 1939 /* Allocate our net_device structure */
1740 ndev = alloc_etherdev(sizeof(struct ocp_enet_private)); 1940 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1741 if (ndev == NULL) { 1941 if (!ndev) {
1742 printk(KERN_ERR 1942 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1743 "emac%d: Could not allocate ethernet device.\n",
1744 ocpdev->def->index); 1943 ocpdev->def->index);
1745 return -ENOMEM; 1944 return -ENOMEM;
1746 } 1945 }
1747 ep = ndev->priv; 1946 dev = ndev->priv;
1748 ep->ndev = ndev; 1947 dev->ndev = ndev;
1749 ep->ocpdev = ocpdev; 1948 dev->ldev = &ocpdev->dev;
1750 ndev->irq = ocpdev->def->irq; 1949 dev->def = ocpdev->def;
1751 ep->wol_irq = emacdata->wol_irq; 1950 SET_MODULE_OWNER(ndev);
1752 if (emacdata->mdio_idx >= 0) {
1753 if (emacdata->mdio_idx == ocpdev->def->index) {
1754 /* Set the common MDIO net_device */
1755 mdio_ndev = ndev;
1756 deferred_init = 1;
1757 }
1758 ep->mdio_dev = mdio_ndev;
1759 } else {
1760 ep->mdio_dev = ndev;
1761 }
1762 1951
1763 ocp_set_drvdata(ocpdev, ndev); 1952 /* Find MAL device we are connected to */
1764 1953 maldev =
1765 spin_lock_init(&ep->lock); 1954 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1766 1955 if (!maldev) {
1767 /* Fill out MAL informations and register commac */ 1956 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1768 ep->mal = mal; 1957 dev->def->index, emacdata->mal_idx);
1769 ep->mal_tx_chan = emacdata->mal_tx_chan; 1958 err = -ENODEV;
1770 ep->mal_rx_chan = emacdata->mal_rx_chan; 1959 goto out;
1771 ep->commac.ops = &emac_commac_ops; 1960 }
1772 ep->commac.dev = ndev; 1961 dev->mal = ocp_get_drvdata(maldev);
1773 ep->commac.tx_chan_mask = MAL_CHAN_MASK(ep->mal_tx_chan); 1962 if (!dev->mal) {
1774 ep->commac.rx_chan_mask = MAL_CHAN_MASK(ep->mal_rx_chan); 1963 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1775 rc = mal_register_commac(ep->mal, &ep->commac); 1964 dev->def->index, emacdata->mal_idx);
1776 if (rc != 0) 1965 err = -ENODEV;
1777 goto bail; 1966 goto out;
1778 commac_reg = 1;
1779
1780 /* Map our MMIOs */
1781 ep->emacp = (emac_t *) ioremap(ocpdev->def->paddr, sizeof(emac_t));
1782
1783 /* Check if we need to attach to a ZMII */
1784 if (emacdata->zmii_idx >= 0) {
1785 ep->zmii_input = emacdata->zmii_mux;
1786 ep->zmii_dev =
1787 ocp_find_device(OCP_ANY_ID, OCP_FUNC_ZMII,
1788 emacdata->zmii_idx);
1789 if (ep->zmii_dev == NULL)
1790 printk(KERN_WARNING
1791 "emac%d: ZMII %d requested but not found !\n",
1792 ocpdev->def->index, emacdata->zmii_idx);
1793 else if ((rc =
1794 emac_init_zmii(ep->zmii_dev, ep->zmii_input,
1795 emacdata->phy_mode)) != 0)
1796 goto bail;
1797 } 1967 }
1798 1968
1799 /* Check if we need to attach to a RGMII */ 1969 /* Register with MAL */
1800 if (emacdata->rgmii_idx >= 0) { 1970 dev->commac.ops = &emac_commac_ops;
1801 ep->rgmii_input = emacdata->rgmii_mux; 1971 dev->commac.dev = dev;
1802 ep->rgmii_dev = 1972 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1803 ocp_find_device(OCP_ANY_ID, OCP_FUNC_RGMII, 1973 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1804 emacdata->rgmii_idx); 1974 err = mal_register_commac(dev->mal, &dev->commac);
1805 if (ep->rgmii_dev == NULL) 1975 if (err) {
1806 printk(KERN_WARNING 1976 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1807 "emac%d: RGMII %d requested but not found !\n", 1977 dev->def->index, emacdata->mal_idx);
1808 ocpdev->def->index, emacdata->rgmii_idx); 1978 goto out;
1809 else if ((rc = 1979 }
1810 emac_init_rgmii(ep->rgmii_dev, ep->rgmii_input, 1980 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1811 emacdata->phy_mode)) != 0) 1981 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1812 goto bail; 1982
1983 /* Get pointers to BD rings */
1984 dev->tx_desc =
1985 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1986 emacdata->mal_tx_chan);
1987 dev->rx_desc =
1988 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1989 emacdata->mal_rx_chan);
1990
1991 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1992 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1993
1994 /* Clean rings */
1995 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1996 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1997
1998 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1999 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2000 struct ocp_device *mdiodev =
2001 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2002 emacdata->mdio_idx);
2003 if (!mdiodev) {
2004 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2005 dev->def->index, emacdata->mdio_idx);
2006 err = -ENODEV;
2007 goto out2;
2008 }
2009 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2010 if (!dev->mdio_dev) {
2011 printk(KERN_ERR
2012 "emac%d: emac%d hasn't been initialized yet!\n",
2013 dev->def->index, emacdata->mdio_idx);
2014 err = -ENODEV;
2015 goto out2;
2016 }
1813 } 2017 }
1814 2018
1815 /* Check if we need to attach to a TAH */ 2019 /* Attach to ZMII, if needed */
1816 if (emacdata->tah_idx >= 0) { 2020 if ((err = zmii_attach(dev)) != 0)
1817 ep->tah_dev = 2021 goto out2;
1818 ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH, 2022
1819 emacdata->tah_idx); 2023 /* Attach to RGMII, if needed */
1820 if (ep->tah_dev == NULL) 2024 if ((err = rgmii_attach(dev)) != 0)
1821 printk(KERN_WARNING 2025 goto out3;
1822 "emac%d: TAH %d requested but not found !\n", 2026
1823 ocpdev->def->index, emacdata->tah_idx); 2027 /* Attach to TAH, if needed */
1824 else if ((rc = emac_init_tah(ep)) != 0) 2028 if ((err = tah_attach(dev)) != 0)
1825 goto bail; 2029 goto out4;
2030
2031 /* Map EMAC regs */
2032 dev->emacp =
2033 (struct emac_regs *)ioremap(dev->def->paddr,
2034 sizeof(struct emac_regs));
2035 if (!dev->emacp) {
2036 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2037 dev->def->index);
2038 err = -ENOMEM;
2039 goto out5;
1826 } 2040 }
1827 2041
1828 if (deferred_init) { 2042 /* Fill in MAC address */
1829 if (!list_empty(&emac_init_list)) { 2043 for (i = 0; i < 6; ++i)
1830 struct list_head *entry; 2044 ndev->dev_addr[i] = emacdata->mac_addr[i];
1831 struct emac_def_dev *ddev;
1832 2045
1833 list_for_each(entry, &emac_init_list) { 2046 /* Set some link defaults before we can find out real parameters */
1834 ddev = 2047 dev->phy.speed = SPEED_100;
1835 list_entry(entry, struct emac_def_dev, 2048 dev->phy.duplex = DUPLEX_FULL;
1836 link); 2049 dev->phy.autoneg = AUTONEG_DISABLE;
1837 emac_init_device(ddev->ocpdev, ddev->mal); 2050 dev->phy.pause = dev->phy.asym_pause = 0;
1838 } 2051 init_timer(&dev->link_timer);
2052 dev->link_timer.function = emac_link_timer;
2053 dev->link_timer.data = (unsigned long)dev;
2054
2055 /* Find PHY if any */
2056 dev->phy.dev = ndev;
2057 dev->phy.mode = emacdata->phy_mode;
2058 if (emacdata->phy_map != 0xffffffff) {
2059 u32 phy_map = emacdata->phy_map | busy_phy_map;
2060 u32 adv;
2061
2062 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2063 emacdata->phy_map, busy_phy_map);
2064
2065 EMAC_RX_CLK_TX(dev->def->index);
2066
2067 dev->phy.mdio_read = emac_mdio_read;
2068 dev->phy.mdio_write = emac_mdio_write;
2069
2070 /* Configure EMAC with defaults so we can at least use MDIO
2071 * This is needed mostly for 440GX
2072 */
2073 if (emac_phy_gpcs(dev->phy.mode)) {
2074 /* XXX
2075 * Make GPCS PHY address equal to EMAC index.
2076 * We probably should take into account busy_phy_map
2077 * and/or phy_map here.
2078 */
2079 dev->phy.address = dev->def->index;
1839 } 2080 }
1840 } 2081
2082 emac_configure(dev);
1841 2083
1842 /* Init link monitoring timer */ 2084 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
1843 init_timer(&ep->link_timer); 2085 if (!(phy_map & 1)) {
1844 ep->link_timer.function = emac_link_timer; 2086 int r;
1845 ep->link_timer.data = (unsigned long)ep; 2087 busy_phy_map |= 1 << i;
1846 ep->timer_ticks = 0; 2088
1847 2089 /* Quick check if there is a PHY at the address */
1848 /* Fill up the mii_phy structure */ 2090 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
1849 ep->phy_mii.dev = ndev; 2091 if (r == 0xffff || r < 0)
1850 ep->phy_mii.mdio_read = emac_phy_read; 2092 continue;
1851 ep->phy_mii.mdio_write = emac_phy_write; 2093 if (!mii_phy_probe(&dev->phy, i))
1852 ep->phy_mii.mode = emacdata->phy_mode; 2094 break;
1853 2095 }
1854 /* Find PHY */ 2096 if (i == 0x20) {
1855 phy_map = emacdata->phy_map | busy_phy_map; 2097 printk(KERN_WARNING "emac%d: can't find PHY!\n",
1856 for (i = 0; i <= 0x1f; i++, phy_map >>= 1) { 2098 dev->def->index);
1857 if ((phy_map & 0x1) == 0) { 2099 goto out6;
1858 int val = emac_phy_read(ndev, i, MII_BMCR);
1859 if (val != 0xffff && val != -1)
1860 break;
1861 } 2100 }
1862 }
1863 if (i == 0x20) {
1864 printk(KERN_WARNING "emac%d: Can't find PHY.\n",
1865 ocpdev->def->index);
1866 rc = -ENODEV;
1867 goto bail;
1868 }
1869 busy_phy_map |= 1 << i;
1870 ep->mii_phy_addr = i;
1871 rc = mii_phy_probe(&ep->phy_mii, i);
1872 if (rc) {
1873 printk(KERN_WARNING "emac%d: Failed to probe PHY type.\n",
1874 ocpdev->def->index);
1875 rc = -ENODEV;
1876 goto bail;
1877 }
1878
1879 /* Disable any PHY features not supported by the platform */
1880 ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
1881 2101
1882 /* Setup initial PHY config & startup aneg */ 2102 /* Init PHY */
1883 if (ep->phy_mii.def->ops->init) 2103 if (dev->phy.def->ops->init)
1884 ep->phy_mii.def->ops->init(&ep->phy_mii); 2104 dev->phy.def->ops->init(&dev->phy);
1885 netif_carrier_off(ndev);
1886 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1887 ep->want_autoneg = 1;
1888 else {
1889 ep->want_autoneg = 0;
1890 2105
1891 /* Select highest supported speed/duplex */ 2106 /* Disable any PHY features not supported by the platform */
1892 if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) { 2107 dev->phy.def->features &= ~emacdata->phy_feat_exc;
1893 ep->phy_mii.speed = SPEED_1000; 2108
1894 ep->phy_mii.duplex = DUPLEX_FULL; 2109 /* Setup initial link parameters */
1895 } else if (ep->phy_mii.def->features & 2110 if (dev->phy.features & SUPPORTED_Autoneg) {
1896 SUPPORTED_1000baseT_Half) { 2111 adv = dev->phy.features;
1897 ep->phy_mii.speed = SPEED_1000; 2112#if !defined(CONFIG_40x)
1898 ep->phy_mii.duplex = DUPLEX_HALF; 2113 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1899 } else if (ep->phy_mii.def->features & 2114#endif
1900 SUPPORTED_100baseT_Full) { 2115 /* Restart autonegotiation */
1901 ep->phy_mii.speed = SPEED_100; 2116 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
1902 ep->phy_mii.duplex = DUPLEX_FULL;
1903 } else if (ep->phy_mii.def->features &
1904 SUPPORTED_100baseT_Half) {
1905 ep->phy_mii.speed = SPEED_100;
1906 ep->phy_mii.duplex = DUPLEX_HALF;
1907 } else if (ep->phy_mii.def->features &
1908 SUPPORTED_10baseT_Full) {
1909 ep->phy_mii.speed = SPEED_10;
1910 ep->phy_mii.duplex = DUPLEX_FULL;
1911 } else { 2117 } else {
1912 ep->phy_mii.speed = SPEED_10; 2118 u32 f = dev->phy.def->features;
1913 ep->phy_mii.duplex = DUPLEX_HALF; 2119 int speed = SPEED_10, fd = DUPLEX_HALF;
2120
2121 /* Select highest supported speed/duplex */
2122 if (f & SUPPORTED_1000baseT_Full) {
2123 speed = SPEED_1000;
2124 fd = DUPLEX_FULL;
2125 } else if (f & SUPPORTED_1000baseT_Half)
2126 speed = SPEED_1000;
2127 else if (f & SUPPORTED_100baseT_Full) {
2128 speed = SPEED_100;
2129 fd = DUPLEX_FULL;
2130 } else if (f & SUPPORTED_100baseT_Half)
2131 speed = SPEED_100;
2132 else if (f & SUPPORTED_10baseT_Full)
2133 fd = DUPLEX_FULL;
2134
2135 /* Force link parameters */
2136 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
1914 } 2137 }
1915 } 2138 } else {
1916 emac_start_link(ep, NULL); 2139 emac_reset(dev);
1917 2140
1918 /* read the MAC Address */ 2141 /* PHY-less configuration.
1919 for (i = 0; i < 6; i++) 2142 * XXX I probably should move these settings to emacdata
1920 ndev->dev_addr[i] = emacdata->mac_addr[i]; 2143 */
2144 dev->phy.address = -1;
2145 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2146 dev->phy.pause = 1;
2147 }
1921 2148
1922 /* Fill in the driver function table */ 2149 /* Fill in the driver function table */
1923 ndev->open = &emac_open; 2150 ndev->open = &emac_open;
1924 ndev->hard_start_xmit = &emac_start_xmit; 2151 if (dev->tah_dev) {
2152 ndev->hard_start_xmit = &emac_start_xmit_sg;
2153 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2154 } else
2155 ndev->hard_start_xmit = &emac_start_xmit;
2156 ndev->tx_timeout = &emac_full_tx_reset;
2157 ndev->watchdog_timeo = 5 * HZ;
1925 ndev->stop = &emac_close; 2158 ndev->stop = &emac_close;
1926 ndev->get_stats = &emac_stats; 2159 ndev->get_stats = &emac_stats;
1927 if (emacdata->jumbo)
1928 ndev->change_mtu = &emac_change_mtu;
1929 ndev->set_mac_address = &emac_set_mac_address;
1930 ndev->set_multicast_list = &emac_set_multicast_list; 2160 ndev->set_multicast_list = &emac_set_multicast_list;
1931 ndev->do_ioctl = &emac_ioctl; 2161 ndev->do_ioctl = &emac_ioctl;
2162 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2163 ndev->change_mtu = &emac_change_mtu;
2164 dev->commac.ops = &emac_commac_sg_ops;
2165 }
1932 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2166 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
1933 if (emacdata->tah_idx >= 0)
1934 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
1935#ifdef CONFIG_NET_POLL_CONTROLLER
1936 ndev->poll_controller = emac_netpoll;
1937#endif
1938 2167
1939 SET_MODULE_OWNER(ndev); 2168 netif_carrier_off(ndev);
2169 netif_stop_queue(ndev);
2170
2171 err = register_netdev(ndev);
2172 if (err) {
2173 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2174 dev->def->index, err);
2175 goto out6;
2176 }
1940 2177
1941 rc = register_netdev(ndev); 2178 ocp_set_drvdata(ocpdev, dev);
1942 if (rc != 0)
1943 goto bail;
1944 2179
1945 printk("%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", 2180 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
1946 ndev->name, 2181 ndev->name, dev->def->index,
1947 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], 2182 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1948 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); 2183 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1949 printk(KERN_INFO "%s: Found %s PHY (0x%02x)\n",
1950 ndev->name, ep->phy_mii.def->name, ep->mii_phy_addr);
1951 2184
1952 bail: 2185 if (dev->phy.address >= 0)
1953 if (rc && commac_reg) 2186 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
1954 mal_unregister_commac(ep->mal, &ep->commac); 2187 dev->phy.def->name, dev->phy.address);
1955 if (rc && ndev)
1956 kfree(ndev);
1957 2188
1958 return rc; 2189 emac_dbg_register(dev->def->index, dev);
1959}
1960
1961static int emac_probe(struct ocp_device *ocpdev)
1962{
1963 struct ocp_device *maldev;
1964 struct ibm_ocp_mal *mal;
1965 struct ocp_func_emac_data *emacdata;
1966
1967 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1968 if (emacdata == NULL) {
1969 printk(KERN_ERR "emac%d: Missing additional datas !\n",
1970 ocpdev->def->index);
1971 return -ENODEV;
1972 }
1973
1974 /* Get the MAL device */
1975 maldev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_MAL, emacdata->mal_idx);
1976 if (maldev == NULL) {
1977 printk("No maldev\n");
1978 return -ENODEV;
1979 }
1980 /*
1981 * Get MAL driver data, it must be here due to link order.
1982 * When the driver is modularized, symbol dependencies will
1983 * ensure the MAL driver is already present if built as a
1984 * module.
1985 */
1986 mal = (struct ibm_ocp_mal *)ocp_get_drvdata(maldev);
1987 if (mal == NULL) {
1988 printk("No maldrv\n");
1989 return -ENODEV;
1990 }
1991
1992 /* If we depend on another EMAC for MDIO, wait for it to show up */
1993 if (emacdata->mdio_idx >= 0 &&
1994 (emacdata->mdio_idx != ocpdev->def->index) && !mdio_ndev) {
1995 struct emac_def_dev *ddev;
1996 /* Add this index to the deferred init table */
1997 ddev = kmalloc(sizeof(struct emac_def_dev), GFP_KERNEL);
1998 ddev->ocpdev = ocpdev;
1999 ddev->mal = mal;
2000 list_add_tail(&ddev->link, &emac_init_list);
2001 } else {
2002 emac_init_device(ocpdev, mal);
2003 }
2004 2190
2005 return 0; 2191 return 0;
2192 out6:
2193 iounmap((void *)dev->emacp);
2194 out5:
2195 tah_fini(dev->tah_dev);
2196 out4:
2197 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2198 out3:
2199 zmii_fini(dev->zmii_dev, dev->zmii_input);
2200 out2:
2201 mal_unregister_commac(dev->mal, &dev->commac);
2202 out:
2203 kfree(ndev);
2204 return err;
2006} 2205}
2007 2206
2008/* Structure for a device driver */
2009static struct ocp_device_id emac_ids[] = { 2207static struct ocp_device_id emac_ids[] = {
2010 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_EMAC}, 2208 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2011 {.vendor = OCP_VENDOR_INVALID} 2209 { .vendor = OCP_VENDOR_INVALID}
2012}; 2210};
2013 2211
2014static struct ocp_driver emac_driver = { 2212static struct ocp_driver emac_driver = {
2015 .name = "emac", 2213 .name = "emac",
2016 .id_table = emac_ids, 2214 .id_table = emac_ids,
2017
2018 .probe = emac_probe, 2215 .probe = emac_probe,
2019 .remove = emac_remove, 2216 .remove = emac_remove,
2020}; 2217};
2021 2218
2022static int __init emac_init(void) 2219static int __init emac_init(void)
2023{ 2220{
2024 printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n"); 2221 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2025 printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n"); 2222
2223 DBG(": init" NL);
2026 2224
2027 if (skb_res > 2) { 2225 if (mal_init())
2028 printk(KERN_WARNING "Invalid skb_res: %d, cropping to 2\n", 2226 return -ENODEV;
2029 skb_res); 2227
2030 skb_res = 2; 2228 EMAC_CLK_INTERNAL;
2229 if (ocp_register_driver(&emac_driver)) {
2230 EMAC_CLK_EXTERNAL;
2231 ocp_unregister_driver(&emac_driver);
2232 mal_exit();
2233 return -ENODEV;
2031 } 2234 }
2235 EMAC_CLK_EXTERNAL;
2032 2236
2033 return ocp_register_driver(&emac_driver); 2237 emac_init_debug();
2238 return 0;
2034} 2239}
2035 2240
2036static void __exit emac_exit(void) 2241static void __exit emac_exit(void)
2037{ 2242{
2243 DBG(": exit" NL);
2038 ocp_unregister_driver(&emac_driver); 2244 ocp_unregister_driver(&emac_driver);
2245 mal_exit();
2246 emac_fini_debug();
2039} 2247}
2040 2248
2041module_init(emac_init); 2249module_init(emac_init);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
index 97e6e1ea8c89..e9b44d030ac3 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.h
+++ b/drivers/net/ibm_emac/ibm_emac_core.h
@@ -1,146 +1,221 @@
1/* 1/*
2 * ibm_emac_core.h 2 * drivers/net/ibm_emac/ibm_emac_core.h
3 * 3 *
4 * Ethernet driver for the built in ethernet on the IBM 405 PowerPC 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * processor.
6 * 5 *
7 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
8 * Sept, 2001 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * 8 *
10 * Orignial driver 9 * Based on original work by
11 * Johnnie Peters 10 * Armin Kuster <akuster@mvista.com>
12 * jpeters@mvista.com 11 * Johnnie Peters <jpeters@mvista.com>
13 * 12 * Copyright 2000, 2001 MontaVista Softare Inc.
14 * Copyright 2000 MontaVista Softare Inc.
15 * 13 *
16 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version. 17 * option) any later version.
18 *
20 */ 19 */
20#ifndef __IBM_EMAC_CORE_H_
21#define __IBM_EMAC_CORE_H_
21 22
22#ifndef _IBM_EMAC_CORE_H_ 23#include <linux/config.h>
23#define _IBM_EMAC_CORE_H_
24
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/dma-mapping.h>
26#include <asm/ocp.h> 26#include <asm/ocp.h>
27#include <asm/mmu.h> /* For phys_addr_t */
28 27
29#include "ibm_emac.h" 28#include "ibm_emac.h"
30#include "ibm_emac_phy.h" 29#include "ibm_emac_phy.h"
31#include "ibm_emac_rgmii.h"
32#include "ibm_emac_zmii.h" 30#include "ibm_emac_zmii.h"
31#include "ibm_emac_rgmii.h"
33#include "ibm_emac_mal.h" 32#include "ibm_emac_mal.h"
34#include "ibm_emac_tah.h" 33#include "ibm_emac_tah.h"
35 34
36#ifndef CONFIG_IBM_EMAC_TXB 35#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
37#define NUM_TX_BUFF 64 36#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
38#define NUM_RX_BUFF 64
39#else
40#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
41#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
42#endif
43 37
44/* This does 16 byte alignment, exactly what we need. 38/* Simple sanity check */
45 * The packet length includes FCS, but we don't want to 39#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
46 * include that when passing upstream as it messes up 40#error Invalid number of buffer descriptors (greater than 256)
47 * bridging applications.
48 */
49#ifndef CONFIG_IBM_EMAC_SKBRES
50#define SKB_RES 2
51#else
52#define SKB_RES CONFIG_IBM_EMAC_SKBRES
53#endif 41#endif
54 42
55/* Note about alignement. alloc_skb() returns a cache line 43// XXX
56 * aligned buffer. However, dev_alloc_skb() will add 16 more 44#define EMAC_MIN_MTU 46
57 * bytes and "reserve" them, so our buffer will actually end 45#define EMAC_MAX_MTU 9000
58 * on a half cache line. What we do is to use directly 46
59 * alloc_skb, allocate 16 more bytes to match the total amount 47/* Maximum L2 header length (VLAN tagged, no FCS) */
60 * allocated by dev_alloc_skb(), but we don't reserve. 48#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
49
50/* RX BD size for the given MTU */
51static inline int emac_rx_size(int mtu)
52{
53 if (mtu > ETH_DATA_LEN)
54 return MAL_MAX_RX_SIZE;
55 else
56 return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
57}
58
59#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
60
61#define EMAC_RX_SKB_HEADROOM \
62 EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
63
64/* Size of RX skb for the given MTU */
65static inline int emac_rx_skb_size(int mtu)
66{
67 int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
68 return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
69}
70
71/* RX DMA sync size */
72static inline int emac_rx_sync_size(int mtu)
73{
74 return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
75}
76
77/* Driver statistcs is split into two parts to make it more cache friendly:
78 * - normal statistics (packet count, etc)
79 * - error statistics
80 *
81 * When statistics is requested by ethtool, these parts are concatenated,
82 * normal one goes first.
83 *
84 * Please, keep these structures in sync with emac_stats_keys.
61 */ 85 */
62#define MAX_NUM_BUF_DESC 255 86
63#define DESC_BUF_SIZE 4080 /* max 4096-16 */ 87/* Normal TX/RX Statistics */
64#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16) 88struct ibm_emac_stats {
65 89 u64 rx_packets;
66/* Transmitter timeout. */ 90 u64 rx_bytes;
67#define TX_TIMEOUT (2*HZ) 91 u64 tx_packets;
68 92 u64 tx_bytes;
69/* MDIO latency delay */ 93 u64 rx_packets_csum;
70#define MDIO_DELAY 250 94 u64 tx_packets_csum;
71 95};
72/* Power managment shift registers */ 96
73#define IBM_CPM_EMMII 0 /* Shift value for MII */ 97/* Error statistics */
74#define IBM_CPM_EMRX 1 /* Shift value for recv */ 98struct ibm_emac_error_stats {
75#define IBM_CPM_EMTX 2 /* Shift value for MAC */ 99 u64 tx_undo;
76#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX)) 100
77 101 /* Software RX Errors */
78#define ENET_HEADER_SIZE 14 102 u64 rx_dropped_stack;
79#define ENET_FCS_SIZE 4 103 u64 rx_dropped_oom;
80#define ENET_DEF_MTU_SIZE 1500 104 u64 rx_dropped_error;
81#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE) 105 u64 rx_dropped_resize;
82#define EMAC_MIN_FRAME 64 106 u64 rx_dropped_mtu;
83#define EMAC_MAX_FRAME 9018 107 u64 rx_stopped;
84#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) 108 /* BD reported RX errors */
85#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) 109 u64 rx_bd_errors;
86 110 u64 rx_bd_overrun;
87#ifdef CONFIG_IBM_EMAC_ERRMSG 111 u64 rx_bd_bad_packet;
88void emac_serr_dump_0(struct net_device *dev); 112 u64 rx_bd_runt_packet;
89void emac_serr_dump_1(struct net_device *dev); 113 u64 rx_bd_short_event;
90void emac_err_dump(struct net_device *dev, int em0isr); 114 u64 rx_bd_alignment_error;
91void emac_phy_dump(struct net_device *); 115 u64 rx_bd_bad_fcs;
92void emac_desc_dump(struct net_device *); 116 u64 rx_bd_packet_too_long;
93void emac_mac_dump(struct net_device *); 117 u64 rx_bd_out_of_range;
94void emac_mal_dump(struct net_device *); 118 u64 rx_bd_in_range;
95#else 119 /* EMAC IRQ reported RX errors */
96#define emac_serr_dump_0(dev) do { } while (0) 120 u64 rx_parity;
97#define emac_serr_dump_1(dev) do { } while (0) 121 u64 rx_fifo_overrun;
98#define emac_err_dump(dev,x) do { } while (0) 122 u64 rx_overrun;
99#define emac_phy_dump(dev) do { } while (0) 123 u64 rx_bad_packet;
100#define emac_desc_dump(dev) do { } while (0) 124 u64 rx_runt_packet;
101#define emac_mac_dump(dev) do { } while (0) 125 u64 rx_short_event;
102#define emac_mal_dump(dev) do { } while (0) 126 u64 rx_alignment_error;
103#endif 127 u64 rx_bad_fcs;
128 u64 rx_packet_too_long;
129 u64 rx_out_of_range;
130 u64 rx_in_range;
131
132 /* Software TX Errors */
133 u64 tx_dropped;
134 /* BD reported TX errors */
135 u64 tx_bd_errors;
136 u64 tx_bd_bad_fcs;
137 u64 tx_bd_carrier_loss;
138 u64 tx_bd_excessive_deferral;
139 u64 tx_bd_excessive_collisions;
140 u64 tx_bd_late_collision;
141 u64 tx_bd_multple_collisions;
142 u64 tx_bd_single_collision;
143 u64 tx_bd_underrun;
144 u64 tx_bd_sqe;
145 /* EMAC IRQ reported TX errors */
146 u64 tx_parity;
147 u64 tx_underrun;
148 u64 tx_sqe;
149 u64 tx_errors;
150};
151
152#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct ibm_emac_stats) + \
153 sizeof(struct ibm_emac_error_stats)) \
154 / sizeof(u64))
104 155
105struct ocp_enet_private { 156struct ocp_enet_private {
106 struct sk_buff *tx_skb[NUM_TX_BUFF]; 157 struct net_device *ndev; /* 0 */
107 struct sk_buff *rx_skb[NUM_RX_BUFF]; 158 struct emac_regs *emacp;
108 struct mal_descriptor *tx_desc; 159
109 struct mal_descriptor *rx_desc; 160 struct mal_descriptor *tx_desc;
110 struct mal_descriptor *rx_dirty; 161 int tx_cnt;
111 struct net_device_stats stats; 162 int tx_slot;
112 int tx_cnt; 163 int ack_slot;
113 int rx_slot; 164
114 int dirty_rx; 165 struct mal_descriptor *rx_desc;
115 int tx_slot; 166 int rx_slot;
116 int ack_slot; 167 struct sk_buff *rx_sg_skb; /* 1 */
117 int rx_buffer_size; 168 int rx_skb_size;
118 169 int rx_sync_size;
119 struct mii_phy phy_mii; 170
120 int mii_phy_addr; 171 struct ibm_emac_stats stats;
121 int want_autoneg; 172 struct ocp_device *tah_dev;
122 int timer_ticks; 173
123 struct timer_list link_timer; 174 struct ibm_ocp_mal *mal;
124 struct net_device *mdio_dev; 175 struct mal_commac commac;
125 176
126 struct ocp_device *rgmii_dev; 177 struct sk_buff *tx_skb[NUM_TX_BUFF];
127 int rgmii_input; 178 struct sk_buff *rx_skb[NUM_RX_BUFF];
128 179
129 struct ocp_device *zmii_dev; 180 struct ocp_device *zmii_dev;
130 int zmii_input; 181 int zmii_input;
131 182 struct ocp_enet_private *mdio_dev;
132 struct ibm_ocp_mal *mal; 183 struct ocp_device *rgmii_dev;
133 int mal_tx_chan, mal_rx_chan; 184 int rgmii_input;
134 struct mal_commac commac; 185
135 186 struct ocp_def *def;
136 struct ocp_device *tah_dev; 187
137 188 struct mii_phy phy;
138 int opened; 189 struct timer_list link_timer;
139 int going_away; 190 int reset_failed;
140 int wol_irq; 191
141 emac_t *emacp; 192 struct ibm_emac_error_stats estats;
142 struct ocp_device *ocpdev; 193 struct net_device_stats nstats;
143 struct net_device *ndev; 194
144 spinlock_t lock; 195 struct device* ldev;
145}; 196};
146#endif /* _IBM_EMAC_CORE_H_ */ 197
198/* Ethtool get_regs complex data.
199 * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
200 * when available.
201 *
202 * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
203 * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
204 * Each register component is preceded with emac_ethtool_regs_subhdr.
205 * Order of the optional headers follows their relative bit posititions
206 * in emac_ethtool_regs_hdr.components
207 */
208#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
209#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
210#define EMAC_ETHTOOL_REGS_TAH 0x00000004
211
212struct emac_ethtool_regs_hdr {
213 u32 components;
214};
215
216struct emac_ethtool_regs_subhdr {
217 u32 version;
218 u32 index;
219};
220
221#endif /* __IBM_EMAC_CORE_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c
index c8512046cf84..75d3b8639041 100644
--- a/drivers/net/ibm_emac/ibm_emac_debug.c
+++ b/drivers/net/ibm_emac/ibm_emac_debug.c
@@ -1,224 +1,213 @@
1/* 1/*
2 * ibm_ocp_debug.c 2 * drivers/net/ibm_emac/ibm_emac_debug.c
3 * 3 *
4 * This has all the debug routines that where in *_enet.c 4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 * 5 *
6 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies
7 * April , 2002 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Copyright 2002 MontaVista Softare Inc.
10 * 8 *
11 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 12 * option) any later version.
13 *
15 */ 14 */
16
17#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/sysrq.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include "ibm_ocp_mal.h"
22#include "ibm_ocp_zmii.h"
23#include "ibm_ocp_enet.h"
24 22
25extern int emac_phy_read(struct net_device *dev, int mii_id, int reg); 23#include "ibm_emac_core.h"
24
25static void emac_desc_dump(int idx, struct ocp_enet_private *p)
26{
27 int i;
28 printk("** EMAC%d TX BDs **\n"
29 " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
30 idx, p->tx_cnt, p->tx_slot, p->ack_slot);
31 for (i = 0; i < NUM_TX_BUFF / 2; ++i)
32 printk
33 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
34 i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
35 p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
36 NUM_TX_BUFF / 2 + i,
37 p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
38 p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
39 p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
40 p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
41
42 printk("** EMAC%d RX BDs **\n"
43 " rx_slot = %d rx_stopped = %d rx_skb_size = %d rx_sync_size = %d\n"
44 " rx_sg_skb = 0x%p\n",
45 idx, p->rx_slot, p->commac.rx_stopped, p->rx_skb_size,
46 p->rx_sync_size, p->rx_sg_skb);
47 for (i = 0; i < NUM_RX_BUFF / 2; ++i)
48 printk
49 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
50 i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
51 p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
52 NUM_RX_BUFF / 2 + i,
53 p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
54 p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
55 p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
56 p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
57}
58
59static void emac_mac_dump(int idx, struct ocp_enet_private *dev)
60{
61 struct emac_regs *p = dev->emacp;
62
63 printk("** EMAC%d registers **\n"
64 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
65 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
66 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n"
67 "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x "
68 "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n"
69 "LSA = %04x%08x IPGVR = 0x%04x\n"
70 "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
71 "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n",
72 idx, in_be32(&p->mr0), in_be32(&p->mr1),
73 in_be32(&p->tmr0), in_be32(&p->tmr1),
74 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
75 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
76 in_be32(&p->vtci),
77 in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3),
78 in_be32(&p->iaht4),
79 in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3),
80 in_be32(&p->gaht4),
81 in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
82 in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
83 in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr)
84 );
85
86 emac_desc_dump(idx, dev);
87}
88
89static void emac_mal_dump(struct ibm_ocp_mal *mal)
90{
91 struct ocp_func_mal_data *maldata = mal->def->additions;
92 int i;
93
94 printk("** MAL%d Registers **\n"
95 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
96 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
97 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
98 mal->def->index,
99 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
100 get_mal_dcrn(mal, MAL_IER),
101 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
102 get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
103 get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
104 get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
105 );
106
107 printk("TX|");
108 for (i = 0; i < maldata->num_tx_chans; ++i) {
109 if (i && !(i % 4))
110 printk("\n ");
111 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
112 }
113 printk("\nRX|");
114 for (i = 0; i < maldata->num_rx_chans; ++i) {
115 if (i && !(i % 4))
116 printk("\n ");
117 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
118 }
119 printk("\n ");
120 for (i = 0; i < maldata->num_rx_chans; ++i) {
121 u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
122 if (i && !(i % 3))
123 printk("\n ");
124 printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
125 }
126 printk("\n");
127}
128
129static struct ocp_enet_private *__emacs[4];
130static struct ibm_ocp_mal *__mals[1];
26 131
27void emac_phy_dump(struct net_device *dev) 132void emac_dbg_register(int idx, struct ocp_enet_private *dev)
28{ 133{
29 struct ocp_enet_private *fep = dev->priv; 134 unsigned long flags;
30 unsigned long i; 135
31 uint data; 136 if (idx >= sizeof(__emacs) / sizeof(__emacs[0])) {
32 137 printk(KERN_WARNING
33 printk(KERN_DEBUG " Prepare for Phy dump....\n"); 138 "invalid index %d when registering EMAC for debugging\n",
34 for (i = 0; i < 0x1A; i++) { 139 idx);
35 data = emac_phy_read(dev, fep->mii_phy_addr, i); 140 return;
36 printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data);
37 if (i == 0x07)
38 i = 0x0f;
39 } 141 }
142
143 local_irq_save(flags);
144 __emacs[idx] = dev;
145 local_irq_restore(flags);
40} 146}
41 147
42void emac_desc_dump(struct net_device *dev) 148void mal_dbg_register(int idx, struct ibm_ocp_mal *mal)
43{ 149{
44 struct ocp_enet_private *fep = dev->priv; 150 unsigned long flags;
45 int curr_slot; 151
46 152 if (idx >= sizeof(__mals) / sizeof(__mals[0])) {
47 printk(KERN_DEBUG 153 printk(KERN_WARNING
48 "dumping the receive descriptors: current slot is %d\n", 154 "invalid index %d when registering MAL for debugging\n",
49 fep->rx_slot); 155 idx);
50 for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) { 156 return;
51 printk(KERN_DEBUG
52 "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n",
53 curr_slot, fep->rx_desc[curr_slot].ctrl,
54 fep->rx_desc[curr_slot].data_len,
55 (unsigned int)fep->rx_desc[curr_slot].data_ptr);
56 } 157 }
158
159 local_irq_save(flags);
160 __mals[idx] = mal;
161 local_irq_restore(flags);
57} 162}
58 163
59void emac_mac_dump(struct net_device *dev) 164void emac_dbg_dump_all(void)
60{ 165{
61 struct ocp_enet_private *fep = dev->priv; 166 unsigned int i;
62 volatile emac_t *emacp = fep->emacp; 167 unsigned long flags;
63 168
64 printk(KERN_DEBUG "EMAC DEBUG ********** \n"); 169 local_irq_save(flags);
65 printk(KERN_DEBUG "EMAC_M0 ==> 0x%x\n", in_be32(&emacp->em0mr0)); 170
66 printk(KERN_DEBUG "EMAC_M1 ==> 0x%x\n", in_be32(&emacp->em0mr1)); 171 for (i = 0; i < sizeof(__mals) / sizeof(__mals[0]); ++i)
67 printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0)); 172 if (__mals[i])
68 printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1)); 173 emac_mal_dump(__mals[i]);
69 printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr)); 174
70 printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr)); 175 for (i = 0; i < sizeof(__emacs) / sizeof(__emacs[0]); ++i)
71 printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser)); 176 if (__emacs[i])
72 printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr)); 177 emac_mac_dump(i, __emacs[i]);
73 printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr)); 178
74 printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n", 179 local_irq_restore(flags);
75 in_be32(&emacp->em0vtpid));
76} 180}
77 181
78void emac_mal_dump(struct net_device *dev) 182#if defined(CONFIG_MAGIC_SYSRQ)
183static void emac_sysrq_handler(int key, struct pt_regs *pt_regs,
184 struct tty_struct *tty)
79{ 185{
80 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 186 emac_dbg_dump_all();
81
82 printk(KERN_DEBUG " MAL DEBUG ********** \n");
83 printk(KERN_DEBUG " MCR ==> 0x%x\n",
84 (unsigned int)get_mal_dcrn(mal, DCRN_MALCR));
85 printk(KERN_DEBUG " ESR ==> 0x%x\n",
86 (unsigned int)get_mal_dcrn(mal, DCRN_MALESR));
87 printk(KERN_DEBUG " IER ==> 0x%x\n",
88 (unsigned int)get_mal_dcrn(mal, DCRN_MALIER));
89#ifdef CONFIG_40x
90 printk(KERN_DEBUG " DBR ==> 0x%x\n",
91 (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR));
92#endif /* CONFIG_40x */
93 printk(KERN_DEBUG " TXCASR ==> 0x%x\n",
94 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR));
95 printk(KERN_DEBUG " TXCARR ==> 0x%x\n",
96 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR));
97 printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n",
98 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR));
99 printk(KERN_DEBUG " TXDEIR ==> 0x%x\n",
100 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR));
101 printk(KERN_DEBUG " RXCASR ==> 0x%x\n",
102 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR));
103 printk(KERN_DEBUG " RXCARR ==> 0x%x\n",
104 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR));
105 printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n",
106 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR));
107 printk(KERN_DEBUG " RXDEIR ==> 0x%x\n",
108 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR));
109 printk(KERN_DEBUG " TXCTP0R ==> 0x%x\n",
110 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R));
111 printk(KERN_DEBUG " TXCTP1R ==> 0x%x\n",
112 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R));
113 printk(KERN_DEBUG " TXCTP2R ==> 0x%x\n",
114 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R));
115 printk(KERN_DEBUG " TXCTP3R ==> 0x%x\n",
116 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R));
117 printk(KERN_DEBUG " RXCTP0R ==> 0x%x\n",
118 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R));
119 printk(KERN_DEBUG " RXCTP1R ==> 0x%x\n",
120 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R));
121 printk(KERN_DEBUG " RCBS0 ==> 0x%x\n",
122 (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0));
123 printk(KERN_DEBUG " RCBS1 ==> 0x%x\n",
124 (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1));
125} 187}
126 188
127void emac_serr_dump_0(struct net_device *dev) 189static struct sysrq_key_op emac_sysrq_op = {
190 .handler = emac_sysrq_handler,
191 .help_msg = "emaC",
192 .action_msg = "Show EMAC(s) status",
193};
194
195int __init emac_init_debug(void)
128{ 196{
129 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 197 return register_sysrq_key('c', &emac_sysrq_op);
130 unsigned long int mal_error, plb_error, plb_addr;
131
132 mal_error = get_mal_dcrn(mal, DCRN_MALESR);
133 printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n",
134 (mal_error & 0x40000000) ? "Receive" :
135 "Transmit", (mal_error & 0x3e000000) >> 25);
136 printk(KERN_DEBUG " ----- latched error -----\n");
137 if (mal_error & MALESR_DE)
138 printk(KERN_DEBUG " DE: descriptor error\n");
139 if (mal_error & MALESR_OEN)
140 printk(KERN_DEBUG " ONE: OPB non-fullword error\n");
141 if (mal_error & MALESR_OTE)
142 printk(KERN_DEBUG " OTE: OPB timeout error\n");
143 if (mal_error & MALESR_OSE)
144 printk(KERN_DEBUG " OSE: OPB slave error\n");
145
146 if (mal_error & MALESR_PEIN) {
147 plb_error = mfdcr(DCRN_PLB0_BESR);
148 printk(KERN_DEBUG
149 " PEIN: PLB error, PLB0_BESR is 0x%x\n",
150 (unsigned int)plb_error);
151 plb_addr = mfdcr(DCRN_PLB0_BEAR);
152 printk(KERN_DEBUG
153 " PEIN: PLB error, PLB0_BEAR is 0x%x\n",
154 (unsigned int)plb_addr);
155 }
156} 198}
157 199
158void emac_serr_dump_1(struct net_device *dev) 200void __exit emac_fini_debug(void)
159{ 201{
160 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 202 unregister_sysrq_key('c', &emac_sysrq_op);
161 int mal_error = get_mal_dcrn(mal, DCRN_MALESR);
162
163 printk(KERN_DEBUG " ----- cumulative errors -----\n");
164 if (mal_error & MALESR_DEI)
165 printk(KERN_DEBUG " DEI: descriptor error interrupt\n");
166 if (mal_error & MALESR_ONEI)
167 printk(KERN_DEBUG " OPB non-fullword error interrupt\n");
168 if (mal_error & MALESR_OTEI)
169 printk(KERN_DEBUG " OTEI: timeout error interrupt\n");
170 if (mal_error & MALESR_OSEI)
171 printk(KERN_DEBUG " OSEI: slave error interrupt\n");
172 if (mal_error & MALESR_PBEI)
173 printk(KERN_DEBUG " PBEI: PLB bus error interrupt\n");
174} 203}
175 204
176void emac_err_dump(struct net_device *dev, int em0isr) 205#else
206int __init emac_init_debug(void)
207{
208 return 0;
209}
210void __exit emac_fini_debug(void)
177{ 211{
178 printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name);
179
180 if (em0isr & EMAC_ISR_OVR)
181 printk(KERN_DEBUG " OVR: overrun\n");
182 if (em0isr & EMAC_ISR_PP)
183 printk(KERN_DEBUG " PP: control pause packet\n");
184 if (em0isr & EMAC_ISR_BP)
185 printk(KERN_DEBUG " BP: packet error\n");
186 if (em0isr & EMAC_ISR_RP)
187 printk(KERN_DEBUG " RP: runt packet\n");
188 if (em0isr & EMAC_ISR_SE)
189 printk(KERN_DEBUG " SE: short event\n");
190 if (em0isr & EMAC_ISR_ALE)
191 printk(KERN_DEBUG " ALE: odd number of nibbles in packet\n");
192 if (em0isr & EMAC_ISR_BFCS)
193 printk(KERN_DEBUG " BFCS: bad FCS\n");
194 if (em0isr & EMAC_ISR_PTLE)
195 printk(KERN_DEBUG " PTLE: oversized packet\n");
196 if (em0isr & EMAC_ISR_ORE)
197 printk(KERN_DEBUG
198 " ORE: packet length field > max allowed LLC\n");
199 if (em0isr & EMAC_ISR_IRE)
200 printk(KERN_DEBUG " IRE: In Range error\n");
201 if (em0isr & EMAC_ISR_DBDM)
202 printk(KERN_DEBUG " DBDM: xmit error or SQE\n");
203 if (em0isr & EMAC_ISR_DB0)
204 printk(KERN_DEBUG " DB0: xmit error or SQE on TX channel 0\n");
205 if (em0isr & EMAC_ISR_SE0)
206 printk(KERN_DEBUG
207 " SE0: Signal Quality Error test failure from TX channel 0\n");
208 if (em0isr & EMAC_ISR_TE0)
209 printk(KERN_DEBUG " TE0: xmit channel 0 aborted\n");
210 if (em0isr & EMAC_ISR_DB1)
211 printk(KERN_DEBUG " DB1: xmit error or SQE on TX channel \n");
212 if (em0isr & EMAC_ISR_SE1)
213 printk(KERN_DEBUG
214 " SE1: Signal Quality Error test failure from TX channel 1\n");
215 if (em0isr & EMAC_ISR_TE1)
216 printk(KERN_DEBUG " TE1: xmit channel 1 aborted\n");
217 if (em0isr & EMAC_ISR_MOS)
218 printk(KERN_DEBUG " MOS\n");
219 if (em0isr & EMAC_ISR_MOF)
220 printk(KERN_DEBUG " MOF\n");
221
222 emac_mac_dump(dev);
223 emac_mal_dump(dev);
224} 212}
213#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.h b/drivers/net/ibm_emac/ibm_emac_debug.h
new file mode 100644
index 000000000000..e85fbe0a8da9
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_debug.h
@@ -0,0 +1,63 @@
1/*
2 * drivers/net/ibm_emac/ibm_ocp_debug.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15#ifndef __IBM_EMAC_DEBUG_H_
16#define __IBM_EMAC_DEBUG_H_
17
18#include <linux/config.h>
19#include <linux/init.h>
20#include "ibm_emac_core.h"
21#include "ibm_emac_mal.h"
22
23#if defined(CONFIG_IBM_EMAC_DEBUG)
24void emac_dbg_register(int idx, struct ocp_enet_private *dev);
25void mal_dbg_register(int idx, struct ibm_ocp_mal *mal);
26int emac_init_debug(void) __init;
27void emac_fini_debug(void) __exit;
28void emac_dbg_dump_all(void);
29# define DBG_LEVEL 1
30#else
31# define emac_dbg_register(x,y) ((void)0)
32# define mal_dbg_register(x,y) ((void)0)
33# define emac_init_debug() ((void)0)
34# define emac_fini_debug() ((void)0)
35# define emac_dbg_dump_all() ((void)0)
36# define DBG_LEVEL 0
37#endif
38
39#if DBG_LEVEL > 0
40# define DBG(f,x...) printk("emac" f, ##x)
41# define MAL_DBG(f,x...) printk("mal" f, ##x)
42# define ZMII_DBG(f,x...) printk("zmii" f, ##x)
43# define RGMII_DBG(f,x...) printk("rgmii" f, ##x)
44# define NL "\n"
45#else
46# define DBG(f,x...) ((void)0)
47# define MAL_DBG(f,x...) ((void)0)
48# define ZMII_DBG(f,x...) ((void)0)
49# define RGMII_DBG(f,x...) ((void)0)
50#endif
51#if DBG_LEVEL > 1
52# define DBG2(f,x...) DBG(f, ##x)
53# define MAL_DBG2(f,x...) MAL_DBG(f, ##x)
54# define ZMII_DBG2(f,x...) ZMII_DBG(f, ##x)
55# define RGMII_DBG2(f,x...) RGMII_DBG(f, ##x)
56#else
57# define DBG2(f,x...) ((void)0)
58# define MAL_DBG2(f,x...) ((void)0)
59# define ZMII_DBG2(f,x...) ((void)0)
60# define RGMII_DBG2(f,x...) ((void)0)
61#endif
62
63#endif /* __IBM_EMAC_DEBUG_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
index e59f57f363ca..da88d43081cc 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.c
+++ b/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -1,436 +1,565 @@
1/* 1/*
2 * ibm_ocp_mal.c 2 * drivers/net/ibm_emac/ibm_emac_mal.c
3 * 3 *
4 * Armin Kuster akuster@mvista.com 4 * Memory Access Layer (MAL) support
5 * Juen, 2002 5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
6 * 8 *
7 * Copyright 2002 MontaVista Softare Inc. 9 * Based on original work by
10 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
11 * David Gibson <hermes@gibson.dropbear.id.au>,
12 *
13 * Armin Kuster <akuster@mvista.com>
14 * Copyright 2002 MontaVista Softare Inc.
8 * 15 *
9 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your 18 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 19 * option) any later version.
20 *
13 */ 21 */
14
15#include <linux/config.h> 22#include <linux/config.h>
16#include <linux/module.h> 23#include <linux/module.h>
17#include <linux/kernel.h> 24#include <linux/kernel.h>
18#include <linux/errno.h> 25#include <linux/errno.h>
19#include <linux/netdevice.h> 26#include <linux/netdevice.h>
20#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h>
21#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
22 30
23#include <asm/io.h>
24#include <asm/irq.h>
25#include <asm/ocp.h> 31#include <asm/ocp.h>
26 32
33#include "ibm_emac_core.h"
27#include "ibm_emac_mal.h" 34#include "ibm_emac_mal.h"
35#include "ibm_emac_debug.h"
28 36
29// Locking: Should we share a lock with the client ? The client could provide 37int __init mal_register_commac(struct ibm_ocp_mal *mal,
30// a lock pointer (optionally) in the commac structure... I don't think this is 38 struct mal_commac *commac)
31// really necessary though
32
33/* This lock protects the commac list. On today UP implementations, it's
34 * really only used as IRQ protection in mal_{register,unregister}_commac()
35 */
36static DEFINE_RWLOCK(mal_list_lock);
37
38int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
39{ 39{
40 unsigned long flags; 40 unsigned long flags;
41 local_irq_save(flags);
41 42
42 write_lock_irqsave(&mal_list_lock, flags); 43 MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index,
44 commac->tx_chan_mask, commac->rx_chan_mask);
43 45
44 /* Don't let multiple commacs claim the same channel */ 46 /* Don't let multiple commacs claim the same channel(s) */
45 if ((mal->tx_chan_mask & commac->tx_chan_mask) || 47 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
46 (mal->rx_chan_mask & commac->rx_chan_mask)) { 48 (mal->rx_chan_mask & commac->rx_chan_mask)) {
47 write_unlock_irqrestore(&mal_list_lock, flags); 49 local_irq_restore(flags);
50 printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51 mal->def->index);
48 return -EBUSY; 52 return -EBUSY;
49 } 53 }
50 54
51 mal->tx_chan_mask |= commac->tx_chan_mask; 55 mal->tx_chan_mask |= commac->tx_chan_mask;
52 mal->rx_chan_mask |= commac->rx_chan_mask; 56 mal->rx_chan_mask |= commac->rx_chan_mask;
57 list_add(&commac->list, &mal->list);
53 58
54 list_add(&commac->list, &mal->commac); 59 local_irq_restore(flags);
55
56 write_unlock_irqrestore(&mal_list_lock, flags);
57
58 return 0; 60 return 0;
59} 61}
60 62
61int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) 63void __exit mal_unregister_commac(struct ibm_ocp_mal *mal,
64 struct mal_commac *commac)
62{ 65{
63 unsigned long flags; 66 unsigned long flags;
67 local_irq_save(flags);
64 68
65 write_lock_irqsave(&mal_list_lock, flags); 69 MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index,
70 commac->tx_chan_mask, commac->rx_chan_mask);
66 71
67 mal->tx_chan_mask &= ~commac->tx_chan_mask; 72 mal->tx_chan_mask &= ~commac->tx_chan_mask;
68 mal->rx_chan_mask &= ~commac->rx_chan_mask; 73 mal->rx_chan_mask &= ~commac->rx_chan_mask;
69
70 list_del_init(&commac->list); 74 list_del_init(&commac->list);
71 75
72 write_unlock_irqrestore(&mal_list_lock, flags); 76 local_irq_restore(flags);
73
74 return 0;
75} 77}
76 78
77int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) 79int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
78{ 80{
79 switch (channel) { 81 struct ocp_func_mal_data *maldata = mal->def->additions;
80 case 0: 82 BUG_ON(channel < 0 || channel >= maldata->num_rx_chans ||
81 set_mal_dcrn(mal, DCRN_MALRCBS0, size); 83 size > MAL_MAX_RX_SIZE);
82 break; 84
83#ifdef DCRN_MALRCBS1 85 MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size);
84 case 1: 86
85 set_mal_dcrn(mal, DCRN_MALRCBS1, size); 87 if (size & 0xf) {
86 break; 88 printk(KERN_WARNING
87#endif 89 "mal%d: incorrect RX size %lu for the channel %d\n",
88#ifdef DCRN_MALRCBS2 90 mal->def->index, size, channel);
89 case 2:
90 set_mal_dcrn(mal, DCRN_MALRCBS2, size);
91 break;
92#endif
93#ifdef DCRN_MALRCBS3
94 case 3:
95 set_mal_dcrn(mal, DCRN_MALRCBS3, size);
96 break;
97#endif
98 default:
99 return -EINVAL; 91 return -EINVAL;
100 } 92 }
101 93
94 set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
102 return 0; 95 return 0;
103} 96}
104 97
105static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs) 98int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel)
106{ 99{
107 struct ibm_ocp_mal *mal = dev_instance; 100 struct ocp_func_mal_data *maldata = mal->def->additions;
108 unsigned long mal_error; 101 BUG_ON(channel < 0 || channel >= maldata->num_tx_chans);
102 return channel * NUM_TX_BUFF;
103}
109 104
110 /* 105int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel)
111 * This SERR applies to one of the devices on the MAL, here we charge 106{
112 * it against the first EMAC registered for the MAL. 107 struct ocp_func_mal_data *maldata = mal->def->additions;
113 */ 108 BUG_ON(channel < 0 || channel >= maldata->num_rx_chans);
109 return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
110}
114 111
115 mal_error = get_mal_dcrn(mal, DCRN_MALESR); 112void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel)
113{
114 local_bh_disable();
115 MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel);
116 set_mal_dcrn(mal, MAL_TXCASR,
117 get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
118 local_bh_enable();
119}
116 120
117 printk(KERN_ERR "%s: System Error (MALESR=%lx)\n", 121void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel)
118 "MAL" /* FIXME: get the name right */ , mal_error); 122{
123 set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
124 MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel);
125}
119 126
120 /* FIXME: decipher error */ 127void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel)
121 /* DIXME: distribute to commacs, if possible */ 128{
129 local_bh_disable();
130 MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel);
131 set_mal_dcrn(mal, MAL_RXCASR,
132 get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
133 local_bh_enable();
134}
122 135
123 /* Clear the error status register */ 136void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel)
124 set_mal_dcrn(mal, DCRN_MALESR, mal_error); 137{
138 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
139 MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel);
140}
125 141
126 return IRQ_HANDLED; 142void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac)
143{
144 local_bh_disable();
145 MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac);
146 list_add_tail(&commac->poll_list, &mal->poll_list);
147 local_bh_enable();
127} 148}
128 149
129static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) 150void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac)
151{
152 local_bh_disable();
153 MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac);
154 list_del(&commac->poll_list);
155 local_bh_enable();
156}
157
158/* synchronized by mal_poll() */
159static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal)
160{
161 MAL_DBG2("%d: enable_irq" NL, mal->def->index);
162 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
163}
164
165/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */
166static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal)
167{
168 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
169 MAL_DBG2("%d: disable_irq" NL, mal->def->index);
170}
171
172static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
130{ 173{
131 struct ibm_ocp_mal *mal = dev_instance; 174 struct ibm_ocp_mal *mal = dev_instance;
132 struct list_head *l; 175 u32 esr = get_mal_dcrn(mal, MAL_ESR);
133 unsigned long isr;
134 176
135 isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR); 177 /* Clear the error status register */
136 set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr); 178 set_mal_dcrn(mal, MAL_ESR, esr);
137 179
138 read_lock(&mal_list_lock); 180 MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr);
139 list_for_each(l, &mal->commac) {
140 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
141 181
142 if (isr & mc->tx_chan_mask) { 182 if (esr & MAL_ESR_EVB) {
143 mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask); 183 if (esr & MAL_ESR_DE) {
184 /* We ignore Descriptor error,
185 * TXDE or RXDE interrupt will be generated anyway.
186 */
187 return IRQ_HANDLED;
144 } 188 }
189
190 if (esr & MAL_ESR_PEIN) {
191 /* PLB error, it's probably buggy hardware or
192 * incorrect physical address in BD (i.e. bug)
193 */
194 if (net_ratelimit())
195 printk(KERN_ERR
196 "mal%d: system error, PLB (ESR = 0x%08x)\n",
197 mal->def->index, esr);
198 return IRQ_HANDLED;
199 }
200
201 /* OPB error, it's probably buggy hardware or incorrect EBC setup */
202 if (net_ratelimit())
203 printk(KERN_ERR
204 "mal%d: system error, OPB (ESR = 0x%08x)\n",
205 mal->def->index, esr);
145 } 206 }
146 read_unlock(&mal_list_lock); 207 return IRQ_HANDLED;
208}
209
210static inline void mal_schedule_poll(struct ibm_ocp_mal *mal)
211{
212 if (likely(netif_rx_schedule_prep(&mal->poll_dev))) {
213 MAL_DBG2("%d: schedule_poll" NL, mal->def->index);
214 mal_disable_eob_irq(mal);
215 __netif_rx_schedule(&mal->poll_dev);
216 } else
217 MAL_DBG2("%d: already in poll" NL, mal->def->index);
218}
147 219
220static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
221{
222 struct ibm_ocp_mal *mal = dev_instance;
223 u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
224 MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r);
225 mal_schedule_poll(mal);
226 set_mal_dcrn(mal, MAL_TXEOBISR, r);
148 return IRQ_HANDLED; 227 return IRQ_HANDLED;
149} 228}
150 229
151static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs) 230static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
152{ 231{
153 struct ibm_ocp_mal *mal = dev_instance; 232 struct ibm_ocp_mal *mal = dev_instance;
154 struct list_head *l; 233 u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
155 unsigned long isr; 234 MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r);
235 mal_schedule_poll(mal);
236 set_mal_dcrn(mal, MAL_RXEOBISR, r);
237 return IRQ_HANDLED;
238}
156 239
157 isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR); 240static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
158 set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr); 241{
242 struct ibm_ocp_mal *mal = dev_instance;
243 u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
244 set_mal_dcrn(mal, MAL_TXDEIR, deir);
159 245
160 read_lock(&mal_list_lock); 246 MAL_DBG("%d: txde %08x" NL, mal->def->index, deir);
161 list_for_each(l, &mal->commac) {
162 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
163 247
164 if (isr & mc->rx_chan_mask) { 248 if (net_ratelimit())
165 mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask); 249 printk(KERN_ERR
166 } 250 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
167 } 251 mal->def->index, deir);
168 read_unlock(&mal_list_lock);
169 252
170 return IRQ_HANDLED; 253 return IRQ_HANDLED;
171} 254}
172 255
173static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) 256static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
174{ 257{
175 struct ibm_ocp_mal *mal = dev_instance; 258 struct ibm_ocp_mal *mal = dev_instance;
176 struct list_head *l; 259 struct list_head *l;
177 unsigned long deir; 260 u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
178 261
179 deir = get_mal_dcrn(mal, DCRN_MALTXDEIR); 262 MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir);
180 263
181 /* FIXME: print which MAL correctly */ 264 list_for_each(l, &mal->list) {
182 printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
183 "MAL", deir);
184
185 read_lock(&mal_list_lock);
186 list_for_each(l, &mal->commac) {
187 struct mal_commac *mc = list_entry(l, struct mal_commac, list); 265 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
188 266 if (deir & mc->rx_chan_mask) {
189 if (deir & mc->tx_chan_mask) { 267 mc->rx_stopped = 1;
190 mc->ops->txde(mc->dev, deir & mc->tx_chan_mask); 268 mc->ops->rxde(mc->dev);
191 } 269 }
192 } 270 }
193 read_unlock(&mal_list_lock); 271
272 mal_schedule_poll(mal);
273 set_mal_dcrn(mal, MAL_RXDEIR, deir);
194 274
195 return IRQ_HANDLED; 275 return IRQ_HANDLED;
196} 276}
197 277
198/* 278static int mal_poll(struct net_device *ndev, int *budget)
199 * This interrupt should be very rare at best. This occurs when
200 * the hardware has a problem with the receive descriptors. The manual
201 * states that it occurs when the hardware cannot the receive descriptor
202 * empty bit is not set. The recovery mechanism will be to
203 * traverse through the descriptors, handle any that are marked to be
204 * handled and reinitialize each along the way. At that point the driver
205 * will be restarted.
206 */
207static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
208{ 279{
209 struct ibm_ocp_mal *mal = dev_instance; 280 struct ibm_ocp_mal *mal = ndev->priv;
210 struct list_head *l; 281 struct list_head *l;
211 unsigned long deir; 282 int rx_work_limit = min(ndev->quota, *budget), received = 0, done;
212 283
213 deir = get_mal_dcrn(mal, DCRN_MALRXDEIR); 284 MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget,
285 rx_work_limit);
286 again:
287 /* Process TX skbs */
288 list_for_each(l, &mal->poll_list) {
289 struct mal_commac *mc =
290 list_entry(l, struct mal_commac, poll_list);
291 mc->ops->poll_tx(mc->dev);
292 }
214 293
215 /* 294 /* Process RX skbs.
216 * This really is needed. This case encountered in stress testing. 295 * We _might_ need something more smart here to enforce polling fairness.
217 */ 296 */
218 if (deir == 0) 297 list_for_each(l, &mal->poll_list) {
219 return IRQ_HANDLED; 298 struct mal_commac *mc =
220 299 list_entry(l, struct mal_commac, poll_list);
221 /* FIXME: print which MAL correctly */ 300 int n = mc->ops->poll_rx(mc->dev, rx_work_limit);
222 printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n", 301 if (n) {
223 "MAL", deir); 302 received += n;
224 303 rx_work_limit -= n;
225 read_lock(&mal_list_lock); 304 if (rx_work_limit <= 0) {
226 list_for_each(l, &mal->commac) { 305 done = 0;
227 struct mal_commac *mc = list_entry(l, struct mal_commac, list); 306 goto more_work; // XXX What if this is the last one ?
307 }
308 }
309 }
228 310
229 if (deir & mc->rx_chan_mask) { 311 /* We need to disable IRQs to protect from RXDE IRQ here */
230 mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask); 312 local_irq_disable();
313 __netif_rx_complete(ndev);
314 mal_enable_eob_irq(mal);
315 local_irq_enable();
316
317 done = 1;
318
319 /* Check for "rotting" packet(s) */
320 list_for_each(l, &mal->poll_list) {
321 struct mal_commac *mc =
322 list_entry(l, struct mal_commac, poll_list);
323 if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) {
324 MAL_DBG2("%d: rotting packet" NL, mal->def->index);
325 if (netif_rx_reschedule(ndev, received))
326 mal_disable_eob_irq(mal);
327 else
328 MAL_DBG2("%d: already in poll list" NL,
329 mal->def->index);
330
331 if (rx_work_limit > 0)
332 goto again;
333 else
334 goto more_work;
231 } 335 }
336 mc->ops->poll_tx(mc->dev);
232 } 337 }
233 read_unlock(&mal_list_lock);
234 338
235 return IRQ_HANDLED; 339 more_work:
340 ndev->quota -= received;
341 *budget -= received;
342
343 MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget,
344 done ? 0 : 1);
345 return done ? 0 : 1;
346}
347
348static void mal_reset(struct ibm_ocp_mal *mal)
349{
350 int n = 10;
351 MAL_DBG("%d: reset" NL, mal->def->index);
352
353 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
354
355 /* Wait for reset to complete (1 system clock) */
356 while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
357 --n;
358
359 if (unlikely(!n))
360 printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index);
361}
362
363int mal_get_regs_len(struct ibm_ocp_mal *mal)
364{
365 return sizeof(struct emac_ethtool_regs_subhdr) +
366 sizeof(struct ibm_mal_regs);
367}
368
369void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf)
370{
371 struct emac_ethtool_regs_subhdr *hdr = buf;
372 struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1);
373 struct ocp_func_mal_data *maldata = mal->def->additions;
374 int i;
375
376 hdr->version = MAL_VERSION;
377 hdr->index = mal->def->index;
378
379 regs->tx_count = maldata->num_tx_chans;
380 regs->rx_count = maldata->num_rx_chans;
381
382 regs->cfg = get_mal_dcrn(mal, MAL_CFG);
383 regs->esr = get_mal_dcrn(mal, MAL_ESR);
384 regs->ier = get_mal_dcrn(mal, MAL_IER);
385 regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
386 regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
387 regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
388 regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
389 regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
390 regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
391 regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
392 regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
393
394 for (i = 0; i < regs->tx_count; ++i)
395 regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
396
397 for (i = 0; i < regs->rx_count; ++i) {
398 regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
399 regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
400 }
401 return regs + 1;
236} 402}
237 403
238static int __init mal_probe(struct ocp_device *ocpdev) 404static int __init mal_probe(struct ocp_device *ocpdev)
239{ 405{
240 struct ibm_ocp_mal *mal = NULL; 406 struct ibm_ocp_mal *mal;
241 struct ocp_func_mal_data *maldata; 407 struct ocp_func_mal_data *maldata;
242 int err = 0; 408 int err = 0, i, bd_size;
409
410 MAL_DBG("%d: probe" NL, ocpdev->def->index);
243 411
244 maldata = (struct ocp_func_mal_data *)ocpdev->def->additions; 412 maldata = ocpdev->def->additions;
245 if (maldata == NULL) { 413 if (maldata == NULL) {
246 printk(KERN_ERR "mal%d: Missing additional datas !\n", 414 printk(KERN_ERR "mal%d: missing additional data!\n",
247 ocpdev->def->index); 415 ocpdev->def->index);
248 return -ENODEV; 416 return -ENODEV;
249 } 417 }
250 418
251 mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); 419 mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
252 if (mal == NULL) { 420 if (!mal) {
253 printk(KERN_ERR 421 printk(KERN_ERR
254 "mal%d: Out of memory allocating MAL structure !\n", 422 "mal%d: out of memory allocating MAL structure!\n",
255 ocpdev->def->index); 423 ocpdev->def->index);
256 return -ENOMEM; 424 return -ENOMEM;
257 } 425 }
258 memset(mal, 0, sizeof(*mal)); 426 mal->dcrbase = maldata->dcr_base;
259 427 mal->def = ocpdev->def;
260 switch (ocpdev->def->index) {
261 case 0:
262 mal->dcrbase = DCRN_MAL_BASE;
263 break;
264#ifdef DCRN_MAL1_BASE
265 case 1:
266 mal->dcrbase = DCRN_MAL1_BASE;
267 break;
268#endif
269 default:
270 BUG();
271 }
272
273 /**************************/
274 428
275 INIT_LIST_HEAD(&mal->commac); 429 INIT_LIST_HEAD(&mal->poll_list);
430 set_bit(__LINK_STATE_START, &mal->poll_dev.state);
431 mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT;
432 mal->poll_dev.poll = mal_poll;
433 mal->poll_dev.priv = mal;
434 atomic_set(&mal->poll_dev.refcnt, 1);
276 435
277 set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF); 436 INIT_LIST_HEAD(&mal->list);
278 set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
279 437
280 set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */ 438 /* Load power-on reset defaults */
281 /* FIXME: Add delay */ 439 mal_reset(mal);
282 440
283 /* Set the MAL configuration register */ 441 /* Set the MAL configuration register */
284 set_mal_dcrn(mal, DCRN_MALCR, 442 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB |
285 MALCR_PLBB | MALCR_OPBBL | MALCR_LEA | 443 MAL_CFG_OPBBL | MAL_CFG_LEA);
286 MALCR_PLBLT_DEFAULT); 444
287 445 mal_enable_eob_irq(mal);
288 /* It would be nice to allocate buffers separately for each 446
289 * channel, but we can't because the channels share the upper 447 /* Allocate space for BD rings */
290 * 13 bits of address lines. Each channels buffer must also 448 BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32);
291 * be 4k aligned, so we allocate 4k for each channel. This is 449 BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32);
292 * inefficient FIXME: do better, if possible */ 450 bd_size = sizeof(struct mal_descriptor) *
293 mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev, 451 (NUM_TX_BUFF * maldata->num_tx_chans +
294 MAL_DT_ALIGN * 452 NUM_RX_BUFF * maldata->num_rx_chans);
295 maldata->num_tx_chans, 453 mal->bd_virt =
296 &mal->tx_phys_addr, GFP_KERNEL); 454 dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL);
297 if (mal->tx_virt_addr == NULL) { 455
456 if (!mal->bd_virt) {
298 printk(KERN_ERR 457 printk(KERN_ERR
299 "mal%d: Out of memory allocating MAL descriptors !\n", 458 "mal%d: out of memory allocating RX/TX descriptors!\n",
300 ocpdev->def->index); 459 mal->def->index);
301 err = -ENOMEM; 460 err = -ENOMEM;
302 goto fail; 461 goto fail;
303 } 462 }
463 memset(mal->bd_virt, 0, bd_size);
304 464
305 /* God, oh, god, I hate DCRs */ 465 for (i = 0; i < maldata->num_tx_chans; ++i)
306 set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr); 466 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
307#ifdef DCRN_MALTXCTP1R 467 sizeof(struct mal_descriptor) *
308 if (maldata->num_tx_chans > 1) 468 mal_tx_bd_offset(mal, i));
309 set_mal_dcrn(mal, DCRN_MALTXCTP1R, 469
310 mal->tx_phys_addr + MAL_DT_ALIGN); 470 for (i = 0; i < maldata->num_rx_chans; ++i)
311#endif /* DCRN_MALTXCTP1R */ 471 set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
312#ifdef DCRN_MALTXCTP2R 472 sizeof(struct mal_descriptor) *
313 if (maldata->num_tx_chans > 2) 473 mal_rx_bd_offset(mal, i));
314 set_mal_dcrn(mal, DCRN_MALTXCTP2R,
315 mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
316#endif /* DCRN_MALTXCTP2R */
317#ifdef DCRN_MALTXCTP3R
318 if (maldata->num_tx_chans > 3)
319 set_mal_dcrn(mal, DCRN_MALTXCTP3R,
320 mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
321#endif /* DCRN_MALTXCTP3R */
322#ifdef DCRN_MALTXCTP4R
323 if (maldata->num_tx_chans > 4)
324 set_mal_dcrn(mal, DCRN_MALTXCTP4R,
325 mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
326#endif /* DCRN_MALTXCTP4R */
327#ifdef DCRN_MALTXCTP5R
328 if (maldata->num_tx_chans > 5)
329 set_mal_dcrn(mal, DCRN_MALTXCTP5R,
330 mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
331#endif /* DCRN_MALTXCTP5R */
332#ifdef DCRN_MALTXCTP6R
333 if (maldata->num_tx_chans > 6)
334 set_mal_dcrn(mal, DCRN_MALTXCTP6R,
335 mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
336#endif /* DCRN_MALTXCTP6R */
337#ifdef DCRN_MALTXCTP7R
338 if (maldata->num_tx_chans > 7)
339 set_mal_dcrn(mal, DCRN_MALTXCTP7R,
340 mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
341#endif /* DCRN_MALTXCTP7R */
342
343 mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
344 MAL_DT_ALIGN *
345 maldata->num_rx_chans,
346 &mal->rx_phys_addr, GFP_KERNEL);
347
348 set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
349#ifdef DCRN_MALRXCTP1R
350 if (maldata->num_rx_chans > 1)
351 set_mal_dcrn(mal, DCRN_MALRXCTP1R,
352 mal->rx_phys_addr + MAL_DT_ALIGN);
353#endif /* DCRN_MALRXCTP1R */
354#ifdef DCRN_MALRXCTP2R
355 if (maldata->num_rx_chans > 2)
356 set_mal_dcrn(mal, DCRN_MALRXCTP2R,
357 mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
358#endif /* DCRN_MALRXCTP2R */
359#ifdef DCRN_MALRXCTP3R
360 if (maldata->num_rx_chans > 3)
361 set_mal_dcrn(mal, DCRN_MALRXCTP3R,
362 mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
363#endif /* DCRN_MALRXCTP3R */
364 474
365 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); 475 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
366 if (err) 476 if (err)
367 goto fail; 477 goto fail2;
368 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal); 478 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal);
369 if (err) 479 if (err)
370 goto fail; 480 goto fail3;
371 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); 481 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
372 if (err) 482 if (err)
373 goto fail; 483 goto fail4;
374 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); 484 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
375 if (err) 485 if (err)
376 goto fail; 486 goto fail5;
377 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); 487 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
378 if (err) 488 if (err)
379 goto fail; 489 goto fail6;
380 490
381 set_mal_dcrn(mal, DCRN_MALIER, 491 /* Enable all MAL SERR interrupt sources */
382 MALIER_DE | MALIER_NE | MALIER_TE | 492 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
383 MALIER_OPBE | MALIER_PLBE);
384 493
385 /* Advertise me to the rest of the world */ 494 /* Advertise this instance to the rest of the world */
386 ocp_set_drvdata(ocpdev, mal); 495 ocp_set_drvdata(ocpdev, mal);
387 496
388 printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n", 497 mal_dbg_register(mal->def->index, mal);
389 ocpdev->def->index, maldata->num_tx_chans,
390 maldata->num_rx_chans);
391 498
499 printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n",
500 mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans);
392 return 0; 501 return 0;
393 502
503 fail6:
504 free_irq(maldata->rxde_irq, mal);
505 fail5:
506 free_irq(maldata->txeob_irq, mal);
507 fail4:
508 free_irq(maldata->txde_irq, mal);
509 fail3:
510 free_irq(maldata->serr_irq, mal);
511 fail2:
512 dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
394 fail: 513 fail:
395 /* FIXME: dispose requested IRQs ! */ 514 kfree(mal);
396 if (err && mal)
397 kfree(mal);
398 return err; 515 return err;
399} 516}
400 517
401static void __exit mal_remove(struct ocp_device *ocpdev) 518static void __exit mal_remove(struct ocp_device *ocpdev)
402{ 519{
403 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); 520 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
404 struct ocp_func_mal_data *maldata = ocpdev->def->additions; 521 struct ocp_func_mal_data *maldata = mal->def->additions;
522
523 MAL_DBG("%d: remove" NL, mal->def->index);
405 524
406 BUG_ON(!maldata); 525 /* Syncronize with scheduled polling,
526 stolen from net/core/dev.c:dev_close()
527 */
528 clear_bit(__LINK_STATE_START, &mal->poll_dev.state);
529 netif_poll_disable(&mal->poll_dev);
530
531 if (!list_empty(&mal->list)) {
532 /* This is *very* bad */
533 printk(KERN_EMERG
534 "mal%d: commac list is not empty on remove!\n",
535 mal->def->index);
536 }
407 537
408 ocp_set_drvdata(ocpdev, NULL); 538 ocp_set_drvdata(ocpdev, NULL);
409 539
410 /* FIXME: shut down the MAL, deal with dependency with emac */
411 free_irq(maldata->serr_irq, mal); 540 free_irq(maldata->serr_irq, mal);
412 free_irq(maldata->txde_irq, mal); 541 free_irq(maldata->txde_irq, mal);
413 free_irq(maldata->txeob_irq, mal); 542 free_irq(maldata->txeob_irq, mal);
414 free_irq(maldata->rxde_irq, mal); 543 free_irq(maldata->rxde_irq, mal);
415 free_irq(maldata->rxeob_irq, mal); 544 free_irq(maldata->rxeob_irq, mal);
416 545
417 if (mal->tx_virt_addr) 546 mal_reset(mal);
418 dma_free_coherent(&ocpdev->dev,
419 MAL_DT_ALIGN * maldata->num_tx_chans,
420 mal->tx_virt_addr, mal->tx_phys_addr);
421 547
422 if (mal->rx_virt_addr) 548 mal_dbg_register(mal->def->index, NULL);
423 dma_free_coherent(&ocpdev->dev, 549
424 MAL_DT_ALIGN * maldata->num_rx_chans, 550 dma_free_coherent(&ocpdev->dev,
425 mal->rx_virt_addr, mal->rx_phys_addr); 551 sizeof(struct mal_descriptor) *
552 (NUM_TX_BUFF * maldata->num_tx_chans +
553 NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt,
554 mal->bd_dma);
426 555
427 kfree(mal); 556 kfree(mal);
428} 557}
429 558
430/* Structure for a device driver */ 559/* Structure for a device driver */
431static struct ocp_device_id mal_ids[] = { 560static struct ocp_device_id mal_ids[] = {
432 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL}, 561 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL },
433 {.vendor = OCP_VENDOR_INVALID} 562 { .vendor = OCP_VENDOR_INVALID}
434}; 563};
435 564
436static struct ocp_driver mal_driver = { 565static struct ocp_driver mal_driver = {
@@ -441,23 +570,14 @@ static struct ocp_driver mal_driver = {
441 .remove = mal_remove, 570 .remove = mal_remove,
442}; 571};
443 572
444static int __init init_mals(void) 573int __init mal_init(void)
445{ 574{
446 int rc; 575 MAL_DBG(": init" NL);
447 576 return ocp_register_driver(&mal_driver);
448 rc = ocp_register_driver(&mal_driver);
449 if (rc < 0) {
450 ocp_unregister_driver(&mal_driver);
451 return -ENODEV;
452 }
453
454 return 0;
455} 577}
456 578
457static void __exit exit_mals(void) 579void __exit mal_exit(void)
458{ 580{
581 MAL_DBG(": exit" NL);
459 ocp_unregister_driver(&mal_driver); 582 ocp_unregister_driver(&mal_driver);
460} 583}
461
462module_init(init_mals);
463module_exit(exit_mals);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index dd9f0dabc6e0..2a2d3b24b037 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -1,131 +1,268 @@
1#ifndef _IBM_EMAC_MAL_H 1/*
2#define _IBM_EMAC_MAL_H 2 * drivers/net/ibm_emac/ibm_emac_mal.h
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2002 MontaVista Softare Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __IBM_EMAC_MAL_H_
20#define __IBM_EMAC_MAL_H_
3 21
22#include <linux/config.h>
23#include <linux/init.h>
4#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/netdevice.h>
5 26
6#define MAL_DT_ALIGN (4096) /* Alignment for each channel's descriptor table */ 27#include <asm/io.h>
7 28
8#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan)) 29/*
30 * These MAL "versions" probably aren't the real versions IBM uses for these
31 * MAL cores, I assigned them just to make #ifdefs in this file nicer and
32 * reflect the fact that 40x and 44x have slightly different MALs. --ebs
33 */
34#if defined(CONFIG_405GP) || defined(CONFIG_405GPR) || defined(CONFIG_405EP) || \
35 defined(CONFIG_440EP) || defined(CONFIG_440GR) || defined(CONFIG_NP405H)
36#define MAL_VERSION 1
37#elif defined(CONFIG_440GP) || defined(CONFIG_440GX) || defined(CONFIG_440SP) || \
38 defined(CONFIG_440SPE)
39#define MAL_VERSION 2
40#else
41#error "Unknown SoC, please check chip manual and choose MAL 'version'"
42#endif
43
44/* MALx DCR registers */
45#define MAL_CFG 0x00
46#define MAL_CFG_SR 0x80000000
47#define MAL_CFG_PLBB 0x00004000
48#define MAL_CFG_OPBBL 0x00000080
49#define MAL_CFG_EOPIE 0x00000004
50#define MAL_CFG_LEA 0x00000002
51#define MAL_CFG_SD 0x00000001
52#if MAL_VERSION == 1
53#define MAL_CFG_PLBP_MASK 0x00c00000
54#define MAL_CFG_PLBP_10 0x00800000
55#define MAL_CFG_GA 0x00200000
56#define MAL_CFG_OA 0x00100000
57#define MAL_CFG_PLBLE 0x00080000
58#define MAL_CFG_PLBT_MASK 0x00078000
59#define MAL_CFG_DEFAULT (MAL_CFG_PLBP_10 | MAL_CFG_PLBT_MASK)
60#elif MAL_VERSION == 2
61#define MAL_CFG_RPP_MASK 0x00c00000
62#define MAL_CFG_RPP_10 0x00800000
63#define MAL_CFG_RMBS_MASK 0x00300000
64#define MAL_CFG_WPP_MASK 0x000c0000
65#define MAL_CFG_WPP_10 0x00080000
66#define MAL_CFG_WMBS_MASK 0x00030000
67#define MAL_CFG_PLBLE 0x00008000
68#define MAL_CFG_DEFAULT (MAL_CFG_RMBS_MASK | MAL_CFG_WMBS_MASK | \
69 MAL_CFG_RPP_10 | MAL_CFG_WPP_10)
70#else
71#error "Unknown MAL version"
72#endif
73
74#define MAL_ESR 0x01
75#define MAL_ESR_EVB 0x80000000
76#define MAL_ESR_CIDT 0x40000000
77#define MAL_ESR_CID_MASK 0x3e000000
78#define MAL_ESR_CID_SHIFT 25
79#define MAL_ESR_DE 0x00100000
80#define MAL_ESR_OTE 0x00040000
81#define MAL_ESR_OSE 0x00020000
82#define MAL_ESR_PEIN 0x00010000
83#define MAL_ESR_DEI 0x00000010
84#define MAL_ESR_OTEI 0x00000004
85#define MAL_ESR_OSEI 0x00000002
86#define MAL_ESR_PBEI 0x00000001
87#if MAL_VERSION == 1
88#define MAL_ESR_ONE 0x00080000
89#define MAL_ESR_ONEI 0x00000008
90#elif MAL_VERSION == 2
91#define MAL_ESR_PTE 0x00800000
92#define MAL_ESR_PRE 0x00400000
93#define MAL_ESR_PWE 0x00200000
94#define MAL_ESR_PTEI 0x00000080
95#define MAL_ESR_PREI 0x00000040
96#define MAL_ESR_PWEI 0x00000020
97#else
98#error "Unknown MAL version"
99#endif
100
101#define MAL_IER 0x02
102#define MAL_IER_DE 0x00000010
103#define MAL_IER_OTE 0x00000004
104#define MAL_IER_OE 0x00000002
105#define MAL_IER_PE 0x00000001
106#if MAL_VERSION == 1
107#define MAL_IER_NWE 0x00000008
108#define MAL_IER_SOC_EVENTS MAL_IER_NWE
109#elif MAL_VERSION == 2
110#define MAL_IER_PT 0x00000080
111#define MAL_IER_PRE 0x00000040
112#define MAL_IER_PWE 0x00000020
113#define MAL_IER_SOC_EVENTS (MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE)
114#else
115#error "Unknown MAL version"
116#endif
117#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_OTE | \
118 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
119
120#define MAL_TXCASR 0x04
121#define MAL_TXCARR 0x05
122#define MAL_TXEOBISR 0x06
123#define MAL_TXDEIR 0x07
124#define MAL_RXCASR 0x10
125#define MAL_RXCARR 0x11
126#define MAL_RXEOBISR 0x12
127#define MAL_RXDEIR 0x13
128#define MAL_TXCTPR(n) ((n) + 0x20)
129#define MAL_RXCTPR(n) ((n) + 0x40)
130#define MAL_RCBS(n) ((n) + 0x60)
131
132/* In reality MAL can handle TX buffers up to 4095 bytes long,
133 * but this isn't a good round number :) --ebs
134 */
135#define MAL_MAX_TX_SIZE 4080
136#define MAL_MAX_RX_SIZE 4080
137
138static inline int mal_rx_size(int len)
139{
140 len = (len + 0xf) & ~0xf;
141 return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len;
142}
143
144static inline int mal_tx_chunks(int len)
145{
146 return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
147}
148
149#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
9 150
10/* MAL Buffer Descriptor structure */ 151/* MAL Buffer Descriptor structure */
11struct mal_descriptor { 152struct mal_descriptor {
12 unsigned short ctrl; /* MAL / Commac status control bits */ 153 u16 ctrl; /* MAL / Commac status control bits */
13 short data_len; /* Max length is 4K-1 (12 bits) */ 154 u16 data_len; /* Max length is 4K-1 (12 bits) */
14 unsigned char *data_ptr; /* pointer to actual data buffer */ 155 u32 data_ptr; /* pointer to actual data buffer */
15} __attribute__ ((packed)); 156};
16 157
17/* the following defines are for the MadMAL status and control registers. */ 158/* the following defines are for the MadMAL status and control registers. */
18/* MADMAL transmit and receive status/control bits */ 159/* MADMAL transmit and receive status/control bits */
19#define MAL_RX_CTRL_EMPTY 0x8000 160#define MAL_RX_CTRL_EMPTY 0x8000
20#define MAL_RX_CTRL_WRAP 0x4000 161#define MAL_RX_CTRL_WRAP 0x4000
21#define MAL_RX_CTRL_CM 0x2000 162#define MAL_RX_CTRL_CM 0x2000
22#define MAL_RX_CTRL_LAST 0x1000 163#define MAL_RX_CTRL_LAST 0x1000
23#define MAL_RX_CTRL_FIRST 0x0800 164#define MAL_RX_CTRL_FIRST 0x0800
24#define MAL_RX_CTRL_INTR 0x0400 165#define MAL_RX_CTRL_INTR 0x0400
25 166#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST)
26#define MAL_TX_CTRL_READY 0x8000 167#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE)
27#define MAL_TX_CTRL_WRAP 0x4000 168
28#define MAL_TX_CTRL_CM 0x2000 169#define MAL_TX_CTRL_READY 0x8000
29#define MAL_TX_CTRL_LAST 0x1000 170#define MAL_TX_CTRL_WRAP 0x4000
30#define MAL_TX_CTRL_INTR 0x0400 171#define MAL_TX_CTRL_CM 0x2000
172#define MAL_TX_CTRL_LAST 0x1000
173#define MAL_TX_CTRL_INTR 0x0400
31 174
32struct mal_commac_ops { 175struct mal_commac_ops {
33 void (*txeob) (void *dev, u32 chanmask); 176 void (*poll_tx) (void *dev);
34 void (*txde) (void *dev, u32 chanmask); 177 int (*poll_rx) (void *dev, int budget);
35 void (*rxeob) (void *dev, u32 chanmask); 178 int (*peek_rx) (void *dev);
36 void (*rxde) (void *dev, u32 chanmask); 179 void (*rxde) (void *dev);
37}; 180};
38 181
39struct mal_commac { 182struct mal_commac {
40 struct mal_commac_ops *ops; 183 struct mal_commac_ops *ops;
41 void *dev; 184 void *dev;
42 u32 tx_chan_mask, rx_chan_mask; 185 struct list_head poll_list;
43 struct list_head list; 186 int rx_stopped;
187
188 u32 tx_chan_mask;
189 u32 rx_chan_mask;
190 struct list_head list;
44}; 191};
45 192
46struct ibm_ocp_mal { 193struct ibm_ocp_mal {
47 int dcrbase; 194 int dcrbase;
48 195
49 struct list_head commac; 196 struct list_head poll_list;
50 u32 tx_chan_mask, rx_chan_mask; 197 struct net_device poll_dev;
51 198
52 dma_addr_t tx_phys_addr; 199 struct list_head list;
53 struct mal_descriptor *tx_virt_addr; 200 u32 tx_chan_mask;
201 u32 rx_chan_mask;
54 202
55 dma_addr_t rx_phys_addr; 203 dma_addr_t bd_dma;
56 struct mal_descriptor *rx_virt_addr; 204 struct mal_descriptor *bd_virt;
57};
58 205
59#define GET_MAL_STANZA(base,dcrn) \ 206 struct ocp_def *def;
60 case base: \ 207};
61 x = mfdcr(dcrn(base)); \
62 break;
63
64#define SET_MAL_STANZA(base,dcrn, val) \
65 case base: \
66 mtdcr(dcrn(base), (val)); \
67 break;
68
69#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn)
70#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val)
71
72#ifdef DCRN_MAL1_BASE
73#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn)
74#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val)
75#else /* ! DCRN_MAL1_BASE */
76#define GET_MAL1_STANZA(dcrn)
77#define SET_MAL1_STANZA(dcrn,val)
78#endif
79 208
80#define get_mal_dcrn(mal, dcrn) ({ \ 209static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
81 u32 x; \
82 switch ((mal)->dcrbase) { \
83 GET_MAL0_STANZA(dcrn) \
84 GET_MAL1_STANZA(dcrn) \
85 default: \
86 x = 0; \
87 BUG(); \
88 } \
89x; })
90
91#define set_mal_dcrn(mal, dcrn, val) do { \
92 switch ((mal)->dcrbase) { \
93 SET_MAL0_STANZA(dcrn,val) \
94 SET_MAL1_STANZA(dcrn,val) \
95 default: \
96 BUG(); \
97 } } while (0)
98
99static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
100{ 210{
101 set_mal_dcrn(mal, DCRN_MALTXCASR, 211 return mfdcr(mal->dcrbase + reg);
102 get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask);
103} 212}
104 213
105static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal, 214static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
106 u32 chanmask)
107{ 215{
108 set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask); 216 mtdcr(mal->dcrbase + reg, val);
109} 217}
110 218
111static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask) 219/* Register MAL devices */
112{ 220int mal_init(void) __init;
113 set_mal_dcrn(mal, DCRN_MALRXCASR, 221void mal_exit(void) __exit;
114 get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask);
115}
116 222
117static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal, 223int mal_register_commac(struct ibm_ocp_mal *mal,
118 u32 chanmask) 224 struct mal_commac *commac) __init;
119{ 225void mal_unregister_commac(struct ibm_ocp_mal *mal,
120 set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask); 226 struct mal_commac *commac) __exit;
121} 227int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size);
228
229/* Returns BD ring offset for a particular channel
230 (in 'struct mal_descriptor' elements)
231*/
232int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel);
233int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel);
234
235void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel);
236void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel);
237void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel);
238void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel);
122 239
123extern int mal_register_commac(struct ibm_ocp_mal *mal, 240/* Add/remove EMAC to/from MAL polling list */
124 struct mal_commac *commac); 241void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac);
125extern int mal_unregister_commac(struct ibm_ocp_mal *mal, 242void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac);
126 struct mal_commac *commac); 243
244/* Ethtool MAL registers */
245struct ibm_mal_regs {
246 u32 tx_count;
247 u32 rx_count;
248
249 u32 cfg;
250 u32 esr;
251 u32 ier;
252 u32 tx_casr;
253 u32 tx_carr;
254 u32 tx_eobisr;
255 u32 tx_deir;
256 u32 rx_casr;
257 u32 rx_carr;
258 u32 rx_eobisr;
259 u32 rx_deir;
260 u32 tx_ctpr[32];
261 u32 rx_ctpr[32];
262 u32 rcbs[32];
263};
127 264
128extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, 265int mal_get_regs_len(struct ibm_ocp_mal *mal);
129 unsigned long size); 266void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf);
130 267
131#endif /* _IBM_EMAC_MAL_H */ 268#endif /* __IBM_EMAC_MAL_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
index 14213f090e91..67935dd33a65 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.c
+++ b/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -1,96 +1,80 @@
1/* 1/*
2 * ibm_ocp_phy.c 2 * drivers/net/ibm_emac/ibm_emac_phy.c
3 * 3 *
4 * PHY drivers for the ibm ocp ethernet driver. Borrowed 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
5 * from sungem_phy.c, though I only kept the generic MII 5 * Borrowed from sungem_phy.c, though I only kept the generic MII
6 * driver for now. 6 * driver for now.
7 * 7 *
8 * This file should be shared with other drivers or eventually 8 * This file should be shared with other drivers or eventually
9 * merged as the "low level" part of miilib 9 * merged as the "low level" part of miilib
10 * 10 *
11 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) 11 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
12 * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
12 * 13 *
13 */ 14 */
14
15#include <linux/config.h> 15#include <linux/config.h>
16
17#include <linux/module.h> 16#include <linux/module.h>
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/types.h> 18#include <linux/types.h>
22#include <linux/netdevice.h> 19#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/mii.h> 20#include <linux/mii.h>
25#include <linux/ethtool.h> 21#include <linux/ethtool.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
27 23
24#include <asm/ocp.h>
25
28#include "ibm_emac_phy.h" 26#include "ibm_emac_phy.h"
29 27
30static int reset_one_mii_phy(struct mii_phy *phy, int phy_id) 28static inline int phy_read(struct mii_phy *phy, int reg)
29{
30 return phy->mdio_read(phy->dev, phy->address, reg);
31}
32
33static inline void phy_write(struct mii_phy *phy, int reg, int val)
31{ 34{
32 u16 val; 35 phy->mdio_write(phy->dev, phy->address, reg, val);
36}
37
38int mii_reset_phy(struct mii_phy *phy)
39{
40 int val;
33 int limit = 10000; 41 int limit = 10000;
34 42
35 val = __phy_read(phy, phy_id, MII_BMCR); 43 val = phy_read(phy, MII_BMCR);
36 val &= ~BMCR_ISOLATE; 44 val &= ~BMCR_ISOLATE;
37 val |= BMCR_RESET; 45 val |= BMCR_RESET;
38 __phy_write(phy, phy_id, MII_BMCR, val); 46 phy_write(phy, MII_BMCR, val);
39 47
40 udelay(100); 48 udelay(300);
41 49
42 while (limit--) { 50 while (limit--) {
43 val = __phy_read(phy, phy_id, MII_BMCR); 51 val = phy_read(phy, MII_BMCR);
44 if ((val & BMCR_RESET) == 0) 52 if (val >= 0 && (val & BMCR_RESET) == 0)
45 break; 53 break;
46 udelay(10); 54 udelay(10);
47 } 55 }
48 if ((val & BMCR_ISOLATE) && limit > 0) 56 if ((val & BMCR_ISOLATE) && limit > 0)
49 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); 57 phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
50 58
51 return (limit <= 0); 59 return limit <= 0;
52}
53
54static int cis8201_init(struct mii_phy *phy)
55{
56 u16 epcr;
57
58 epcr = phy_read(phy, MII_CIS8201_EPCR);
59 epcr &= ~EPCR_MODE_MASK;
60
61 switch (phy->mode) {
62 case PHY_MODE_TBI:
63 epcr |= EPCR_TBI_MODE;
64 break;
65 case PHY_MODE_RTBI:
66 epcr |= EPCR_RTBI_MODE;
67 break;
68 case PHY_MODE_GMII:
69 epcr |= EPCR_GMII_MODE;
70 break;
71 case PHY_MODE_RGMII:
72 default:
73 epcr |= EPCR_RGMII_MODE;
74 }
75
76 phy_write(phy, MII_CIS8201_EPCR, epcr);
77
78 return 0;
79} 60}
80 61
81static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) 62static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
82{ 63{
83 u16 ctl, adv; 64 int ctl, adv;
84 65
85 phy->autoneg = 1; 66 phy->autoneg = AUTONEG_ENABLE;
86 phy->speed = SPEED_10; 67 phy->speed = SPEED_10;
87 phy->duplex = DUPLEX_HALF; 68 phy->duplex = DUPLEX_HALF;
88 phy->pause = 0; 69 phy->pause = phy->asym_pause = 0;
89 phy->advertising = advertise; 70 phy->advertising = advertise;
90 71
91 /* Setup standard advertise */ 72 /* Setup standard advertise */
92 adv = phy_read(phy, MII_ADVERTISE); 73 adv = phy_read(phy, MII_ADVERTISE);
93 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 74 if (adv < 0)
75 return adv;
76 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
77 ADVERTISE_PAUSE_ASYM);
94 if (advertise & ADVERTISED_10baseT_Half) 78 if (advertise & ADVERTISED_10baseT_Half)
95 adv |= ADVERTISE_10HALF; 79 adv |= ADVERTISE_10HALF;
96 if (advertise & ADVERTISED_10baseT_Full) 80 if (advertise & ADVERTISED_10baseT_Full)
@@ -99,8 +83,25 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
99 adv |= ADVERTISE_100HALF; 83 adv |= ADVERTISE_100HALF;
100 if (advertise & ADVERTISED_100baseT_Full) 84 if (advertise & ADVERTISED_100baseT_Full)
101 adv |= ADVERTISE_100FULL; 85 adv |= ADVERTISE_100FULL;
86 if (advertise & ADVERTISED_Pause)
87 adv |= ADVERTISE_PAUSE_CAP;
88 if (advertise & ADVERTISED_Asym_Pause)
89 adv |= ADVERTISE_PAUSE_ASYM;
102 phy_write(phy, MII_ADVERTISE, adv); 90 phy_write(phy, MII_ADVERTISE, adv);
103 91
92 if (phy->features &
93 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
94 adv = phy_read(phy, MII_CTRL1000);
95 if (adv < 0)
96 return adv;
97 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
98 if (advertise & ADVERTISED_1000baseT_Full)
99 adv |= ADVERTISE_1000FULL;
100 if (advertise & ADVERTISED_1000baseT_Half)
101 adv |= ADVERTISE_1000HALF;
102 phy_write(phy, MII_CTRL1000, adv);
103 }
104
104 /* Start/Restart aneg */ 105 /* Start/Restart aneg */
105 ctl = phy_read(phy, MII_BMCR); 106 ctl = phy_read(phy, MII_BMCR);
106 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 107 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
@@ -111,14 +112,16 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
111 112
112static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) 113static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
113{ 114{
114 u16 ctl; 115 int ctl;
115 116
116 phy->autoneg = 0; 117 phy->autoneg = AUTONEG_DISABLE;
117 phy->speed = speed; 118 phy->speed = speed;
118 phy->duplex = fd; 119 phy->duplex = fd;
119 phy->pause = 0; 120 phy->pause = phy->asym_pause = 0;
120 121
121 ctl = phy_read(phy, MII_BMCR); 122 ctl = phy_read(phy, MII_BMCR);
123 if (ctl < 0)
124 return ctl;
122 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); 125 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
123 126
124 /* First reset the PHY */ 127 /* First reset the PHY */
@@ -132,6 +135,8 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
132 ctl |= BMCR_SPEED100; 135 ctl |= BMCR_SPEED100;
133 break; 136 break;
134 case SPEED_1000: 137 case SPEED_1000:
138 ctl |= BMCR_SPEED1000;
139 break;
135 default: 140 default:
136 return -EINVAL; 141 return -EINVAL;
137 } 142 }
@@ -144,112 +149,155 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
144 149
145static int genmii_poll_link(struct mii_phy *phy) 150static int genmii_poll_link(struct mii_phy *phy)
146{ 151{
147 u16 status; 152 int status;
148 153
149 (void)phy_read(phy, MII_BMSR); 154 /* Clear latched value with dummy read */
155 phy_read(phy, MII_BMSR);
150 status = phy_read(phy, MII_BMSR); 156 status = phy_read(phy, MII_BMSR);
151 if ((status & BMSR_LSTATUS) == 0) 157 if (status < 0 || (status & BMSR_LSTATUS) == 0)
152 return 0; 158 return 0;
153 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) 159 if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
154 return 0; 160 return 0;
155 return 1; 161 return 1;
156} 162}
157 163
158#define MII_CIS8201_ACSR 0x1c 164static int genmii_read_link(struct mii_phy *phy)
159#define ACSR_DUPLEX_STATUS 0x0020
160#define ACSR_SPEED_1000BASET 0x0010
161#define ACSR_SPEED_100BASET 0x0008
162
163static int cis8201_read_link(struct mii_phy *phy)
164{ 165{
165 u16 acsr; 166 if (phy->autoneg == AUTONEG_ENABLE) {
167 int glpa = 0;
168 int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
169 if (lpa < 0)
170 return lpa;
171
172 if (phy->features &
173 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
174 int adv = phy_read(phy, MII_CTRL1000);
175 glpa = phy_read(phy, MII_STAT1000);
166 176
167 if (phy->autoneg) { 177 if (glpa < 0 || adv < 0)
168 acsr = phy_read(phy, MII_CIS8201_ACSR); 178 return adv;
169 179
170 if (acsr & ACSR_DUPLEX_STATUS) 180 glpa &= adv << 2;
181 }
182
183 phy->speed = SPEED_10;
184 phy->duplex = DUPLEX_HALF;
185 phy->pause = phy->asym_pause = 0;
186
187 if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
188 phy->speed = SPEED_1000;
189 if (glpa & LPA_1000FULL)
190 phy->duplex = DUPLEX_FULL;
191 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
192 phy->speed = SPEED_100;
193 if (lpa & LPA_100FULL)
194 phy->duplex = DUPLEX_FULL;
195 } else if (lpa & LPA_10FULL)
196 phy->duplex = DUPLEX_FULL;
197
198 if (phy->duplex == DUPLEX_FULL) {
199 phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
200 phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
201 }
202 } else {
203 int bmcr = phy_read(phy, MII_BMCR);
204 if (bmcr < 0)
205 return bmcr;
206
207 if (bmcr & BMCR_FULLDPLX)
171 phy->duplex = DUPLEX_FULL; 208 phy->duplex = DUPLEX_FULL;
172 else 209 else
173 phy->duplex = DUPLEX_HALF; 210 phy->duplex = DUPLEX_HALF;
174 if (acsr & ACSR_SPEED_1000BASET) { 211 if (bmcr & BMCR_SPEED1000)
175 phy->speed = SPEED_1000; 212 phy->speed = SPEED_1000;
176 } else if (acsr & ACSR_SPEED_100BASET) 213 else if (bmcr & BMCR_SPEED100)
177 phy->speed = SPEED_100; 214 phy->speed = SPEED_100;
178 else 215 else
179 phy->speed = SPEED_10; 216 phy->speed = SPEED_10;
180 phy->pause = 0;
181 }
182 /* On non-aneg, we assume what we put in BMCR is the speed,
183 * though magic-aneg shouldn't prevent this case from occurring
184 */
185 217
218 phy->pause = phy->asym_pause = 0;
219 }
186 return 0; 220 return 0;
187} 221}
188 222
189static int genmii_read_link(struct mii_phy *phy) 223/* Generic implementation for most 10/100/1000 PHYs */
224static struct mii_phy_ops generic_phy_ops = {
225 .setup_aneg = genmii_setup_aneg,
226 .setup_forced = genmii_setup_forced,
227 .poll_link = genmii_poll_link,
228 .read_link = genmii_read_link
229};
230
231static struct mii_phy_def genmii_phy_def = {
232 .phy_id = 0x00000000,
233 .phy_id_mask = 0x00000000,
234 .name = "Generic MII",
235 .ops = &generic_phy_ops
236};
237
238/* CIS8201 */
239#define MII_CIS8201_10BTCSR 0x16
240#define TENBTCSR_ECHO_DISABLE 0x2000
241#define MII_CIS8201_EPCR 0x17
242#define EPCR_MODE_MASK 0x3000
243#define EPCR_GMII_MODE 0x0000
244#define EPCR_RGMII_MODE 0x1000
245#define EPCR_TBI_MODE 0x2000
246#define EPCR_RTBI_MODE 0x3000
247#define MII_CIS8201_ACSR 0x1c
248#define ACSR_PIN_PRIO_SELECT 0x0004
249
250static int cis8201_init(struct mii_phy *phy)
190{ 251{
191 u16 lpa; 252 int epcr;
192 253
193 if (phy->autoneg) { 254 epcr = phy_read(phy, MII_CIS8201_EPCR);
194 lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); 255 if (epcr < 0)
256 return epcr;
195 257
196 phy->speed = SPEED_10; 258 epcr &= ~EPCR_MODE_MASK;
197 phy->duplex = DUPLEX_HALF;
198 phy->pause = 0;
199 259
200 if (lpa & (LPA_100FULL | LPA_100HALF)) { 260 switch (phy->mode) {
201 phy->speed = SPEED_100; 261 case PHY_MODE_TBI:
202 if (lpa & LPA_100FULL) 262 epcr |= EPCR_TBI_MODE;
203 phy->duplex = DUPLEX_FULL; 263 break;
204 } else if (lpa & LPA_10FULL) 264 case PHY_MODE_RTBI:
205 phy->duplex = DUPLEX_FULL; 265 epcr |= EPCR_RTBI_MODE;
266 break;
267 case PHY_MODE_GMII:
268 epcr |= EPCR_GMII_MODE;
269 break;
270 case PHY_MODE_RGMII:
271 default:
272 epcr |= EPCR_RGMII_MODE;
206 } 273 }
207 /* On non-aneg, we assume what we put in BMCR is the speed, 274
208 * though magic-aneg shouldn't prevent this case from occurring 275 phy_write(phy, MII_CIS8201_EPCR, epcr);
209 */ 276
277 /* MII regs override strap pins */
278 phy_write(phy, MII_CIS8201_ACSR,
279 phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT);
280
281 /* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */
282 phy_write(phy, MII_CIS8201_10BTCSR,
283 phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE);
210 284
211 return 0; 285 return 0;
212} 286}
213 287
214#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
215 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
216 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
217#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
218 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
219
220/* CIS8201 phy ops */
221static struct mii_phy_ops cis8201_phy_ops = { 288static struct mii_phy_ops cis8201_phy_ops = {
222 init:cis8201_init, 289 .init = cis8201_init,
223 setup_aneg:genmii_setup_aneg, 290 .setup_aneg = genmii_setup_aneg,
224 setup_forced:genmii_setup_forced, 291 .setup_forced = genmii_setup_forced,
225 poll_link:genmii_poll_link, 292 .poll_link = genmii_poll_link,
226 read_link:cis8201_read_link 293 .read_link = genmii_read_link
227};
228
229/* Generic implementation for most 10/100 PHYs */
230static struct mii_phy_ops generic_phy_ops = {
231 setup_aneg:genmii_setup_aneg,
232 setup_forced:genmii_setup_forced,
233 poll_link:genmii_poll_link,
234 read_link:genmii_read_link
235}; 294};
236 295
237static struct mii_phy_def cis8201_phy_def = { 296static struct mii_phy_def cis8201_phy_def = {
238 phy_id:0x000fc410, 297 .phy_id = 0x000fc410,
239 phy_id_mask:0x000ffff0, 298 .phy_id_mask = 0x000ffff0,
240 name:"CIS8201 Gigabit Ethernet", 299 .name = "CIS8201 Gigabit Ethernet",
241 features:MII_GBIT_FEATURES, 300 .ops = &cis8201_phy_ops
242 magic_aneg:0,
243 ops:&cis8201_phy_ops
244};
245
246static struct mii_phy_def genmii_phy_def = {
247 phy_id:0x00000000,
248 phy_id_mask:0x00000000,
249 name:"Generic MII",
250 features:MII_BASIC_FEATURES,
251 magic_aneg:0,
252 ops:&generic_phy_ops
253}; 301};
254 302
255static struct mii_phy_def *mii_phy_table[] = { 303static struct mii_phy_def *mii_phy_table[] = {
@@ -258,39 +306,60 @@ static struct mii_phy_def *mii_phy_table[] = {
258 NULL 306 NULL
259}; 307};
260 308
261int mii_phy_probe(struct mii_phy *phy, int mii_id) 309int mii_phy_probe(struct mii_phy *phy, int address)
262{ 310{
263 int rc;
264 u32 id;
265 struct mii_phy_def *def; 311 struct mii_phy_def *def;
266 int i; 312 int i;
313 u32 id;
267 314
268 phy->autoneg = 0; 315 phy->autoneg = AUTONEG_DISABLE;
269 phy->advertising = 0; 316 phy->advertising = 0;
270 phy->mii_id = mii_id; 317 phy->address = address;
271 phy->speed = 0; 318 phy->speed = SPEED_10;
272 phy->duplex = 0; 319 phy->duplex = DUPLEX_HALF;
273 phy->pause = 0; 320 phy->pause = phy->asym_pause = 0;
274 321
275 /* Take PHY out of isloate mode and reset it. */ 322 /* Take PHY out of isolate mode and reset it. */
276 rc = reset_one_mii_phy(phy, mii_id); 323 if (mii_reset_phy(phy))
277 if (rc)
278 return -ENODEV; 324 return -ENODEV;
279 325
280 /* Read ID and find matching entry */ 326 /* Read ID and find matching entry */
281 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)) 327 id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
282 & 0xfffffff0;
283 for (i = 0; (def = mii_phy_table[i]) != NULL; i++) 328 for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
284 if ((id & def->phy_id_mask) == def->phy_id) 329 if ((id & def->phy_id_mask) == def->phy_id)
285 break; 330 break;
286 /* Should never be NULL (we have a generic entry), but... */ 331 /* Should never be NULL (we have a generic entry), but... */
287 if (def == NULL) 332 if (!def)
288 return -ENODEV; 333 return -ENODEV;
289 334
290 phy->def = def; 335 phy->def = def;
291 336
337 /* Determine PHY features if needed */
338 phy->features = def->features;
339 if (!phy->features) {
340 u16 bmsr = phy_read(phy, MII_BMSR);
341 if (bmsr & BMSR_ANEGCAPABLE)
342 phy->features |= SUPPORTED_Autoneg;
343 if (bmsr & BMSR_10HALF)
344 phy->features |= SUPPORTED_10baseT_Half;
345 if (bmsr & BMSR_10FULL)
346 phy->features |= SUPPORTED_10baseT_Full;
347 if (bmsr & BMSR_100HALF)
348 phy->features |= SUPPORTED_100baseT_Half;
349 if (bmsr & BMSR_100FULL)
350 phy->features |= SUPPORTED_100baseT_Full;
351 if (bmsr & BMSR_ESTATEN) {
352 u16 esr = phy_read(phy, MII_ESTATUS);
353 if (esr & ESTATUS_1000_TFULL)
354 phy->features |= SUPPORTED_1000baseT_Full;
355 if (esr & ESTATUS_1000_THALF)
356 phy->features |= SUPPORTED_1000baseT_Half;
357 }
358 phy->features |= SUPPORTED_MII;
359 }
360
292 /* Setup default advertising */ 361 /* Setup default advertising */
293 phy->advertising = def->features; 362 phy->advertising = phy->features;
294 363
295 return 0; 364 return 0;
296} 365}
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.h b/drivers/net/ibm_emac/ibm_emac_phy.h
index 61afbea96563..a70e0fea54c4 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.h
+++ b/drivers/net/ibm_emac/ibm_emac_phy.h
@@ -1,65 +1,25 @@
1
2/* 1/*
3 * ibm_emac_phy.h 2 * drivers/net/ibm_emac/ibm_emac_phy.h
4 *
5 * 3 *
6 * Benjamin Herrenschmidt <benh@kernel.crashing.org> 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support
7 * February 2003
8 * 5 *
9 * This program is free software; you can redistribute it and/or modify it 6 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 * under the terms of the GNU General Public License as published by the 7 * February 2003
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 * 8 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 9 * Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 * 10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
29 * 15 *
30 * This file basically duplicates sungem_phy.{c,h} with different PHYs 16 * This file basically duplicates sungem_phy.{c,h} with different PHYs
31 * supported. I'm looking into merging that in a single mii layer more 17 * supported. I'm looking into merging that in a single mii layer more
32 * flexible than mii.c 18 * flexible than mii.c
33 */ 19 */
34 20
35#ifndef _IBM_EMAC_PHY_H_ 21#ifndef _IBM_OCP_PHY_H_
36#define _IBM_EMAC_PHY_H_ 22#define _IBM_OCP_PHY_H_
37
38/*
39 * PHY mode settings
40 * Used for multi-mode capable PHYs
41 */
42#define PHY_MODE_NA 0
43#define PHY_MODE_MII 1
44#define PHY_MODE_RMII 2
45#define PHY_MODE_SMII 3
46#define PHY_MODE_RGMII 4
47#define PHY_MODE_TBI 5
48#define PHY_MODE_GMII 6
49#define PHY_MODE_RTBI 7
50#define PHY_MODE_SGMII 8
51
52/*
53 * PHY specific registers/values
54 */
55
56/* CIS8201 */
57#define MII_CIS8201_EPCR 0x17
58#define EPCR_MODE_MASK 0x3000
59#define EPCR_GMII_MODE 0x0000
60#define EPCR_RGMII_MODE 0x1000
61#define EPCR_TBI_MODE 0x2000
62#define EPCR_RTBI_MODE 0x3000
63 23
64struct mii_phy; 24struct mii_phy;
65 25
@@ -77,7 +37,8 @@ struct mii_phy_ops {
77struct mii_phy_def { 37struct mii_phy_def {
78 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ 38 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
79 u32 phy_id_mask; /* Significant bits */ 39 u32 phy_id_mask; /* Significant bits */
80 u32 features; /* Ethtool SUPPORTED_* defines */ 40 u32 features; /* Ethtool SUPPORTED_* defines or
41 0 for autodetect */
81 int magic_aneg; /* Autoneg does all speed test for us */ 42 int magic_aneg; /* Autoneg does all speed test for us */
82 const char *name; 43 const char *name;
83 const struct mii_phy_ops *ops; 44 const struct mii_phy_ops *ops;
@@ -86,8 +47,11 @@ struct mii_phy_def {
86/* An instance of a PHY, partially borrowed from mii_if_info */ 47/* An instance of a PHY, partially borrowed from mii_if_info */
87struct mii_phy { 48struct mii_phy {
88 struct mii_phy_def *def; 49 struct mii_phy_def *def;
89 int advertising; 50 u32 advertising; /* Ethtool ADVERTISED_* defines */
90 int mii_id; 51 u32 features; /* Copied from mii_phy_def.features
52 or determined automaticaly */
53 int address; /* PHY address */
54 int mode; /* PHY mode */
91 55
92 /* 1: autoneg enabled, 0: disabled */ 56 /* 1: autoneg enabled, 0: disabled */
93 int autoneg; 57 int autoneg;
@@ -98,40 +62,19 @@ struct mii_phy {
98 int speed; 62 int speed;
99 int duplex; 63 int duplex;
100 int pause; 64 int pause;
101 65 int asym_pause;
102 /* PHY mode - if needed */
103 int mode;
104 66
105 /* Provided by host chip */ 67 /* Provided by host chip */
106 struct net_device *dev; 68 struct net_device *dev;
107 int (*mdio_read) (struct net_device * dev, int mii_id, int reg); 69 int (*mdio_read) (struct net_device * dev, int addr, int reg);
108 void (*mdio_write) (struct net_device * dev, int mii_id, int reg, 70 void (*mdio_write) (struct net_device * dev, int addr, int reg,
109 int val); 71 int val);
110}; 72};
111 73
112/* Pass in a struct mii_phy with dev, mdio_read and mdio_write 74/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
113 * filled, the remaining fields will be filled on return 75 * filled, the remaining fields will be filled on return
114 */ 76 */
115extern int mii_phy_probe(struct mii_phy *phy, int mii_id); 77int mii_phy_probe(struct mii_phy *phy, int address);
116 78int mii_reset_phy(struct mii_phy *phy);
117static inline int __phy_read(struct mii_phy *phy, int id, int reg)
118{
119 return phy->mdio_read(phy->dev, id, reg);
120}
121
122static inline void __phy_write(struct mii_phy *phy, int id, int reg, int val)
123{
124 phy->mdio_write(phy->dev, id, reg, val);
125}
126
127static inline int phy_read(struct mii_phy *phy, int reg)
128{
129 return phy->mdio_read(phy->dev, phy->mii_id, reg);
130}
131
132static inline void phy_write(struct mii_phy *phy, int reg, int val)
133{
134 phy->mdio_write(phy->dev, phy->mii_id, reg, val);
135}
136 79
137#endif /* _IBM_EMAC_PHY_H_ */ 80#endif /* _IBM_OCP_PHY_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.c b/drivers/net/ibm_emac/ibm_emac_rgmii.c
new file mode 100644
index 000000000000..f0b1ffb2dbbf
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.c
@@ -0,0 +1,201 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * Copyright 2004 MontaVista Software, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/ethtool.h>
22#include <asm/io.h>
23
24#include "ibm_emac_core.h"
25#include "ibm_emac_debug.h"
26
27/* RGMIIx_FER */
28#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4))
29#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4))
30#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
31#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
32#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
33
34/* RGMIIx_SSR */
35#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
36#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
37#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
38
39/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
40static inline int rgmii_valid_mode(int phy_mode)
41{
42 return phy_mode == PHY_MODE_GMII ||
43 phy_mode == PHY_MODE_RGMII ||
44 phy_mode == PHY_MODE_TBI ||
45 phy_mode == PHY_MODE_RTBI;
46}
47
48static inline const char *rgmii_mode_name(int mode)
49{
50 switch (mode) {
51 case PHY_MODE_RGMII:
52 return "RGMII";
53 case PHY_MODE_TBI:
54 return "TBI";
55 case PHY_MODE_GMII:
56 return "GMII";
57 case PHY_MODE_RTBI:
58 return "RTBI";
59 default:
60 BUG();
61 }
62}
63
64static inline u32 rgmii_mode_mask(int mode, int input)
65{
66 switch (mode) {
67 case PHY_MODE_RGMII:
68 return RGMII_FER_RGMII(input);
69 case PHY_MODE_TBI:
70 return RGMII_FER_TBI(input);
71 case PHY_MODE_GMII:
72 return RGMII_FER_GMII(input);
73 case PHY_MODE_RTBI:
74 return RGMII_FER_RTBI(input);
75 default:
76 BUG();
77 }
78}
79
80static int __init rgmii_init(struct ocp_device *ocpdev, int input, int mode)
81{
82 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
83 struct rgmii_regs *p;
84
85 RGMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, mode);
86
87 if (!dev) {
88 dev = kzalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL);
89 if (!dev) {
90 printk(KERN_ERR
91 "rgmii%d: couldn't allocate device structure!\n",
92 ocpdev->def->index);
93 return -ENOMEM;
94 }
95
96 p = (struct rgmii_regs *)ioremap(ocpdev->def->paddr,
97 sizeof(struct rgmii_regs));
98 if (!p) {
99 printk(KERN_ERR
100 "rgmii%d: could not ioremap device registers!\n",
101 ocpdev->def->index);
102 kfree(dev);
103 return -ENOMEM;
104 }
105
106 dev->base = p;
107 ocp_set_drvdata(ocpdev, dev);
108
109 /* Disable all inputs by default */
110 out_be32(&p->fer, 0);
111 } else
112 p = dev->base;
113
114 /* Enable this input */
115 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
116
117 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n",
118 ocpdev->def->index, input, rgmii_mode_name(mode));
119
120 ++dev->users;
121 return 0;
122}
123
124int __init rgmii_attach(void *emac)
125{
126 struct ocp_enet_private *dev = emac;
127 struct ocp_func_emac_data *emacdata = dev->def->additions;
128
129 /* Check if we need to attach to a RGMII */
130 if (emacdata->rgmii_idx >= 0 && rgmii_valid_mode(emacdata->phy_mode)) {
131 dev->rgmii_input = emacdata->rgmii_mux;
132 dev->rgmii_dev =
133 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_RGMII,
134 emacdata->rgmii_idx);
135 if (!dev->rgmii_dev) {
136 printk(KERN_ERR "emac%d: unknown rgmii%d!\n",
137 dev->def->index, emacdata->rgmii_idx);
138 return -ENODEV;
139 }
140 if (rgmii_init
141 (dev->rgmii_dev, dev->rgmii_input, emacdata->phy_mode)) {
142 printk(KERN_ERR
143 "emac%d: rgmii%d initialization failed!\n",
144 dev->def->index, emacdata->rgmii_idx);
145 return -ENODEV;
146 }
147 }
148 return 0;
149}
150
151void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
152{
153 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
154 u32 ssr = in_be32(&dev->base->ssr) & ~RGMII_SSR_MASK(input);
155
156 RGMII_DBG("%d: speed(%d, %d)" NL, ocpdev->def->index, input, speed);
157
158 if (speed == SPEED_1000)
159 ssr |= RGMII_SSR_1000(input);
160 else if (speed == SPEED_100)
161 ssr |= RGMII_SSR_100(input);
162
163 out_be32(&dev->base->ssr, ssr);
164}
165
166void __exit __rgmii_fini(struct ocp_device *ocpdev, int input)
167{
168 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
169 BUG_ON(!dev || dev->users == 0);
170
171 RGMII_DBG("%d: fini(%d)" NL, ocpdev->def->index, input);
172
173 /* Disable this input */
174 out_be32(&dev->base->fer,
175 in_be32(&dev->base->fer) & ~RGMII_FER_MASK(input));
176
177 if (!--dev->users) {
178 /* Free everything if this is the last user */
179 ocp_set_drvdata(ocpdev, NULL);
180 iounmap((void *)dev->base);
181 kfree(dev);
182 }
183}
184
185int __rgmii_get_regs_len(struct ocp_device *ocpdev)
186{
187 return sizeof(struct emac_ethtool_regs_subhdr) +
188 sizeof(struct rgmii_regs);
189}
190
191void *rgmii_dump_regs(struct ocp_device *ocpdev, void *buf)
192{
193 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
194 struct emac_ethtool_regs_subhdr *hdr = buf;
195 struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
196
197 hdr->version = 0;
198 hdr->index = ocpdev->def->index;
199 memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
200 return regs + 1;
201}
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
index 49f188f4ea6e..a1ffb8a44fff 100644
--- a/drivers/net/ibm_emac/ibm_emac_rgmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Defines for the IBM RGMII bridge 2 * drivers/net/ibm_emac/ibm_emac_rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
3 * 5 *
4 * Based on ocp_zmii.h/ibm_emac_zmii.h 6 * Based on ocp_zmii.h/ibm_emac_zmii.h
5 * Armin Kuster akuster@mvista.com 7 * Armin Kuster akuster@mvista.com
@@ -7,6 +9,9 @@
7 * Copyright 2004 MontaVista Software, Inc. 9 * Copyright 2004 MontaVista Software, Inc.
8 * Matt Porter <mporter@kernel.crashing.org> 10 * Matt Porter <mporter@kernel.crashing.org>
9 * 11 *
12 * Copyright (c) 2004, 2005 Zultys Technologies.
13 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 *
10 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -19,47 +24,42 @@
19#include <linux/config.h> 24#include <linux/config.h>
20 25
21/* RGMII bridge */ 26/* RGMII bridge */
22typedef struct rgmii_regs { 27struct rgmii_regs {
23 u32 fer; /* Function enable register */ 28 u32 fer; /* Function enable register */
24 u32 ssr; /* Speed select register */ 29 u32 ssr; /* Speed select register */
25} rgmii_t; 30};
26
27#define RGMII_INPUTS 4
28 31
29/* RGMII device */ 32/* RGMII device */
30struct ibm_ocp_rgmii { 33struct ibm_ocp_rgmii {
31 struct rgmii_regs *base; 34 struct rgmii_regs *base;
32 int mode[RGMII_INPUTS];
33 int users; /* number of EMACs using this RGMII bridge */ 35 int users; /* number of EMACs using this RGMII bridge */
34}; 36};
35 37
36/* Fuctional Enable Reg */ 38#ifdef CONFIG_IBM_EMAC_RGMII
37#define RGMII_FER_MASK(x) (0x00000007 << (4*x)) 39int rgmii_attach(void *emac) __init;
38#define RGMII_RTBI 0x00000004
39#define RGMII_RGMII 0x00000005
40#define RGMII_TBI 0x00000006
41#define RGMII_GMII 0x00000007
42
43/* Speed Selection reg */
44 40
45#define RGMII_SP2_100 0x00000002 41void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit;
46#define RGMII_SP2_1000 0x00000004 42static inline void rgmii_fini(struct ocp_device *ocpdev, int input)
47#define RGMII_SP3_100 0x00000200 43{
48#define RGMII_SP3_1000 0x00000400 44 if (ocpdev)
45 __rgmii_fini(ocpdev, input);
46}
49 47
50#define RGMII_MII2_SPDMASK 0x00000007 48void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed);
51#define RGMII_MII3_SPDMASK 0x00000700
52 49
53#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000 50int __rgmii_get_regs_len(struct ocp_device *ocpdev);
54#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100 51static inline int rgmii_get_regs_len(struct ocp_device *ocpdev)
55#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000) 52{
56#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000 53 return ocpdev ? __rgmii_get_regs_len(ocpdev) : 0;
57#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100 54}
58#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000)
59 55
60#define RTBI 0 56void *rgmii_dump_regs(struct ocp_device *ocpdev, void *buf);
61#define RGMII 1 57#else
62#define TBI 2 58# define rgmii_attach(x) 0
63#define GMII 3 59# define rgmii_fini(x,y) ((void)0)
60# define rgmii_set_speed(x,y,z) ((void)0)
61# define rgmii_get_regs_len(x) 0
62# define rgmii_dump_regs(x,buf) (buf)
63#endif /* !CONFIG_IBM_EMAC_RGMII */
64 64
65#endif /* _IBM_EMAC_RGMII_H_ */ 65#endif /* _IBM_EMAC_RGMII_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.c b/drivers/net/ibm_emac/ibm_emac_tah.c
new file mode 100644
index 000000000000..af08afc22f9f
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_tah.c
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_tah.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 *
6 * Copyright 2004 MontaVista Software, Inc.
7 * Matt Porter <mporter@kernel.crashing.org>
8 *
9 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/config.h>
17#include <asm/io.h>
18
19#include "ibm_emac_core.h"
20
21static int __init tah_init(struct ocp_device *ocpdev)
22{
23 struct tah_regs *p;
24
25 if (ocp_get_drvdata(ocpdev)) {
26 printk(KERN_ERR "tah%d: already in use!\n", ocpdev->def->index);
27 return -EBUSY;
28 }
29
30 /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
31 p = (struct tah_regs *)ioremap(ocpdev->def->paddr, sizeof(*p));
32 if (!p) {
33 printk(KERN_ERR "tah%d: could not ioremap device registers!\n",
34 ocpdev->def->index);
35 return -ENOMEM;
36 }
37 ocp_set_drvdata(ocpdev, p);
38 __tah_reset(ocpdev);
39
40 return 0;
41}
42
43int __init tah_attach(void *emac)
44{
45 struct ocp_enet_private *dev = emac;
46 struct ocp_func_emac_data *emacdata = dev->def->additions;
47
48 /* Check if we need to attach to a TAH */
49 if (emacdata->tah_idx >= 0) {
50 dev->tah_dev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH,
51 emacdata->tah_idx);
52 if (!dev->tah_dev) {
53 printk(KERN_ERR "emac%d: unknown tah%d!\n",
54 dev->def->index, emacdata->tah_idx);
55 return -ENODEV;
56 }
57 if (tah_init(dev->tah_dev)) {
58 printk(KERN_ERR
59 "emac%d: tah%d initialization failed!\n",
60 dev->def->index, emacdata->tah_idx);
61 return -ENODEV;
62 }
63 }
64 return 0;
65}
66
67void __exit __tah_fini(struct ocp_device *ocpdev)
68{
69 struct tah_regs *p = ocp_get_drvdata(ocpdev);
70 BUG_ON(!p);
71 ocp_set_drvdata(ocpdev, NULL);
72 iounmap((void *)p);
73}
74
75void __tah_reset(struct ocp_device *ocpdev)
76{
77 struct tah_regs *p = ocp_get_drvdata(ocpdev);
78 int n;
79
80 /* Reset TAH */
81 out_be32(&p->mr, TAH_MR_SR);
82 n = 100;
83 while ((in_be32(&p->mr) & TAH_MR_SR) && n)
84 --n;
85
86 if (unlikely(!n))
87 printk(KERN_ERR "tah%d: reset timeout\n", ocpdev->def->index);
88
89 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
90 out_be32(&p->mr,
91 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
92 TAH_MR_DIG);
93}
94
95int __tah_get_regs_len(struct ocp_device *ocpdev)
96{
97 return sizeof(struct emac_ethtool_regs_subhdr) +
98 sizeof(struct tah_regs);
99}
100
101void *tah_dump_regs(struct ocp_device *ocpdev, void *buf)
102{
103 struct tah_regs *dev = ocp_get_drvdata(ocpdev);
104 struct emac_ethtool_regs_subhdr *hdr = buf;
105 struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
106
107 hdr->version = 0;
108 hdr->index = ocpdev->def->index;
109 memcpy_fromio(regs, dev, sizeof(struct tah_regs));
110 return regs + 1;
111}
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h
index ecfc69805521..9299b5dd7eb1 100644
--- a/drivers/net/ibm_emac/ibm_emac_tah.h
+++ b/drivers/net/ibm_emac/ibm_emac_tah.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * Defines for the IBM TAH 2 * drivers/net/ibm_emac/ibm_emac_tah.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
3 * 5 *
4 * Copyright 2004 MontaVista Software, Inc. 6 * Copyright 2004 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 7 * Matt Porter <mporter@kernel.crashing.org>
6 * 8 *
9 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
10 *
7 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 13 * Free Software Foundation; either version 2 of the License, or (at your
@@ -13,36 +17,72 @@
13#ifndef _IBM_EMAC_TAH_H 17#ifndef _IBM_EMAC_TAH_H
14#define _IBM_EMAC_TAH_H 18#define _IBM_EMAC_TAH_H
15 19
20#include <linux/config.h>
21#include <linux/init.h>
22#include <asm/ocp.h>
23
16/* TAH */ 24/* TAH */
17typedef struct tah_regs { 25struct tah_regs {
18 u32 tah_revid; 26 u32 revid;
19 u32 pad[3]; 27 u32 pad[3];
20 u32 tah_mr; 28 u32 mr;
21 u32 tah_ssr0; 29 u32 ssr0;
22 u32 tah_ssr1; 30 u32 ssr1;
23 u32 tah_ssr2; 31 u32 ssr2;
24 u32 tah_ssr3; 32 u32 ssr3;
25 u32 tah_ssr4; 33 u32 ssr4;
26 u32 tah_ssr5; 34 u32 ssr5;
27 u32 tah_tsr; 35 u32 tsr;
28} tah_t; 36};
29 37
30/* TAH engine */ 38/* TAH engine */
31#define TAH_MR_CVR 0x80000000 39#define TAH_MR_CVR 0x80000000
32#define TAH_MR_SR 0x40000000 40#define TAH_MR_SR 0x40000000
33#define TAH_MR_ST_256 0x01000000 41#define TAH_MR_ST_256 0x01000000
34#define TAH_MR_ST_512 0x02000000 42#define TAH_MR_ST_512 0x02000000
35#define TAH_MR_ST_768 0x03000000 43#define TAH_MR_ST_768 0x03000000
36#define TAH_MR_ST_1024 0x04000000 44#define TAH_MR_ST_1024 0x04000000
37#define TAH_MR_ST_1280 0x05000000 45#define TAH_MR_ST_1280 0x05000000
38#define TAH_MR_ST_1536 0x06000000 46#define TAH_MR_ST_1536 0x06000000
39#define TAH_MR_TFS_16KB 0x00000000 47#define TAH_MR_TFS_16KB 0x00000000
40#define TAH_MR_TFS_2KB 0x00200000 48#define TAH_MR_TFS_2KB 0x00200000
41#define TAH_MR_TFS_4KB 0x00400000 49#define TAH_MR_TFS_4KB 0x00400000
42#define TAH_MR_TFS_6KB 0x00600000 50#define TAH_MR_TFS_6KB 0x00600000
43#define TAH_MR_TFS_8KB 0x00800000 51#define TAH_MR_TFS_8KB 0x00800000
44#define TAH_MR_TFS_10KB 0x00a00000 52#define TAH_MR_TFS_10KB 0x00a00000
45#define TAH_MR_DTFP 0x00100000 53#define TAH_MR_DTFP 0x00100000
46#define TAH_MR_DIG 0x00080000 54#define TAH_MR_DIG 0x00080000
55
56#ifdef CONFIG_IBM_EMAC_TAH
57int tah_attach(void *emac) __init;
58
59void __tah_fini(struct ocp_device *ocpdev) __exit;
60static inline void tah_fini(struct ocp_device *ocpdev)
61{
62 if (ocpdev)
63 __tah_fini(ocpdev);
64}
65
66void __tah_reset(struct ocp_device *ocpdev);
67static inline void tah_reset(struct ocp_device *ocpdev)
68{
69 if (ocpdev)
70 __tah_reset(ocpdev);
71}
72
73int __tah_get_regs_len(struct ocp_device *ocpdev);
74static inline int tah_get_regs_len(struct ocp_device *ocpdev)
75{
76 return ocpdev ? __tah_get_regs_len(ocpdev) : 0;
77}
78
79void *tah_dump_regs(struct ocp_device *ocpdev, void *buf);
80#else
81# define tah_attach(x) 0
82# define tah_fini(x) ((void)0)
83# define tah_reset(x) ((void)0)
84# define tah_get_regs_len(x) 0
85# define tah_dump_regs(x,buf) (buf)
86#endif /* !CONFIG_IBM_EMAC_TAH */
47 87
48#endif /* _IBM_EMAC_TAH_H */ 88#endif /* _IBM_EMAC_TAH_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c
new file mode 100644
index 000000000000..35c1185079ed
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.c
@@ -0,0 +1,255 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_zmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2001 MontaVista Softare Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/ethtool.h>
22#include <asm/io.h>
23
24#include "ibm_emac_core.h"
25#include "ibm_emac_debug.h"
26
27/* ZMIIx_FER */
28#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4))
29#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \
30 ZMII_FER_MDI(2) | ZMII_FER_MDI(3))
31
32#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4))
33#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4))
34#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4))
35
36/* ZMIIx_SSR */
37#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4))
38#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4))
39#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4))
40
41/* ZMII only supports MII, RMII and SMII
42 * we also support autodetection for backward compatibility
43 */
44static inline int zmii_valid_mode(int mode)
45{
46 return mode == PHY_MODE_MII ||
47 mode == PHY_MODE_RMII ||
48 mode == PHY_MODE_SMII ||
49 mode == PHY_MODE_NA;
50}
51
52static inline const char *zmii_mode_name(int mode)
53{
54 switch (mode) {
55 case PHY_MODE_MII:
56 return "MII";
57 case PHY_MODE_RMII:
58 return "RMII";
59 case PHY_MODE_SMII:
60 return "SMII";
61 default:
62 BUG();
63 }
64}
65
66static inline u32 zmii_mode_mask(int mode, int input)
67{
68 switch (mode) {
69 case PHY_MODE_MII:
70 return ZMII_FER_MII(input);
71 case PHY_MODE_RMII:
72 return ZMII_FER_RMII(input);
73 case PHY_MODE_SMII:
74 return ZMII_FER_SMII(input);
75 default:
76 return 0;
77 }
78}
79
80static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode)
81{
82 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
83 struct zmii_regs *p;
84
85 ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode);
86
87 if (!dev) {
88 dev = kzalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
89 if (!dev) {
90 printk(KERN_ERR
91 "zmii%d: couldn't allocate device structure!\n",
92 ocpdev->def->index);
93 return -ENOMEM;
94 }
95 dev->mode = PHY_MODE_NA;
96
97 p = (struct zmii_regs *)ioremap(ocpdev->def->paddr,
98 sizeof(struct zmii_regs));
99 if (!p) {
100 printk(KERN_ERR
101 "zmii%d: could not ioremap device registers!\n",
102 ocpdev->def->index);
103 kfree(dev);
104 return -ENOMEM;
105 }
106 dev->base = p;
107 ocp_set_drvdata(ocpdev, dev);
108
109 /* We may need FER value for autodetection later */
110 dev->fer_save = in_be32(&p->fer);
111
112 /* Disable all inputs by default */
113 out_be32(&p->fer, 0);
114 } else
115 p = dev->base;
116
117 if (!zmii_valid_mode(*mode)) {
118 /* Probably an EMAC connected to RGMII,
119 * but it still may need ZMII for MDIO
120 */
121 goto out;
122 }
123
124 /* Autodetect ZMII mode if not specified.
125 * This is only for backward compatibility with the old driver.
126 * Please, always specify PHY mode in your board port to avoid
127 * any surprises.
128 */
129 if (dev->mode == PHY_MODE_NA) {
130 if (*mode == PHY_MODE_NA) {
131 u32 r = dev->fer_save;
132
133 ZMII_DBG("%d: autodetecting mode, FER = 0x%08x" NL,
134 ocpdev->def->index, r);
135
136 if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
137 dev->mode = PHY_MODE_MII;
138 else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
139 dev->mode = PHY_MODE_RMII;
140 else
141 dev->mode = PHY_MODE_SMII;
142 } else
143 dev->mode = *mode;
144
145 printk(KERN_NOTICE "zmii%d: bridge in %s mode\n",
146 ocpdev->def->index, zmii_mode_name(dev->mode));
147 } else {
148 /* All inputs must use the same mode */
149 if (*mode != PHY_MODE_NA && *mode != dev->mode) {
150 printk(KERN_ERR
151 "zmii%d: invalid mode %d specified for input %d\n",
152 ocpdev->def->index, *mode, input);
153 return -EINVAL;
154 }
155 }
156
157 /* Report back correct PHY mode,
158 * it may be used during PHY initialization.
159 */
160 *mode = dev->mode;
161
162 /* Enable this input */
163 out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
164 out:
165 ++dev->users;
166 return 0;
167}
168
169int __init zmii_attach(void *emac)
170{
171 struct ocp_enet_private *dev = emac;
172 struct ocp_func_emac_data *emacdata = dev->def->additions;
173
174 if (emacdata->zmii_idx >= 0) {
175 dev->zmii_input = emacdata->zmii_mux;
176 dev->zmii_dev =
177 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_ZMII,
178 emacdata->zmii_idx);
179 if (!dev->zmii_dev) {
180 printk(KERN_ERR "emac%d: unknown zmii%d!\n",
181 dev->def->index, emacdata->zmii_idx);
182 return -ENODEV;
183 }
184 if (zmii_init
185 (dev->zmii_dev, dev->zmii_input, &emacdata->phy_mode)) {
186 printk(KERN_ERR
187 "emac%d: zmii%d initialization failed!\n",
188 dev->def->index, emacdata->zmii_idx);
189 return -ENODEV;
190 }
191 }
192 return 0;
193}
194
195void __zmii_enable_mdio(struct ocp_device *ocpdev, int input)
196{
197 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
198 u32 fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL;
199
200 ZMII_DBG2("%d: mdio(%d)" NL, ocpdev->def->index, input);
201
202 out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
203}
204
205void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
206{
207 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
208 u32 ssr = in_be32(&dev->base->ssr);
209
210 ZMII_DBG("%d: speed(%d, %d)" NL, ocpdev->def->index, input, speed);
211
212 if (speed == SPEED_100)
213 ssr |= ZMII_SSR_SP(input);
214 else
215 ssr &= ~ZMII_SSR_SP(input);
216
217 out_be32(&dev->base->ssr, ssr);
218}
219
220void __exit __zmii_fini(struct ocp_device *ocpdev, int input)
221{
222 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
223 BUG_ON(!dev || dev->users == 0);
224
225 ZMII_DBG("%d: fini(%d)" NL, ocpdev->def->index, input);
226
227 /* Disable this input */
228 out_be32(&dev->base->fer,
229 in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input));
230
231 if (!--dev->users) {
232 /* Free everything if this is the last user */
233 ocp_set_drvdata(ocpdev, NULL);
234 iounmap((void *)dev->base);
235 kfree(dev);
236 }
237}
238
239int __zmii_get_regs_len(struct ocp_device *ocpdev)
240{
241 return sizeof(struct emac_ethtool_regs_subhdr) +
242 sizeof(struct zmii_regs);
243}
244
245void *zmii_dump_regs(struct ocp_device *ocpdev, void *buf)
246{
247 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
248 struct emac_ethtool_regs_subhdr *hdr = buf;
249 struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
250
251 hdr->version = 0;
252 hdr->index = ocpdev->def->index;
253 memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
254 return regs + 1;
255}
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
index 6f6cd2a39e38..0bb26062c0ad 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -1,23 +1,27 @@
1/* 1/*
2 * ocp_zmii.h 2 * drivers/net/ibm_emac/ibm_emac_zmii.h
3 * 3 *
4 * Defines for the IBM ZMII bridge 4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 * 5 *
6 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Dec, 2001 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 * 8 *
9 * Copyright 2001 MontaVista Softare Inc. 9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2001 MontaVista Softare Inc.
10 * 12 *
11 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 14 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 15 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 16 * option) any later version.
17 *
15 */ 18 */
16
17#ifndef _IBM_EMAC_ZMII_H_ 19#ifndef _IBM_EMAC_ZMII_H_
18#define _IBM_EMAC_ZMII_H_ 20#define _IBM_EMAC_ZMII_H_
19 21
20#include <linux/config.h> 22#include <linux/config.h>
23#include <linux/init.h>
24#include <asm/ocp.h>
21 25
22/* ZMII bridge registers */ 26/* ZMII bridge registers */
23struct zmii_regs { 27struct zmii_regs {
@@ -26,68 +30,54 @@ struct zmii_regs {
26 u32 smiirs; /* SMII status reg */ 30 u32 smiirs; /* SMII status reg */
27}; 31};
28 32
29#define ZMII_INPUTS 4
30
31/* ZMII device */ 33/* ZMII device */
32struct ibm_ocp_zmii { 34struct ibm_ocp_zmii {
33 struct zmii_regs *base; 35 struct zmii_regs *base;
34 int mode[ZMII_INPUTS]; 36 int mode; /* subset of PHY_MODE_XXXX */
35 int users; /* number of EMACs using this ZMII bridge */ 37 int users; /* number of EMACs using this ZMII bridge */
38 u32 fer_save; /* FER value left by firmware */
36}; 39};
37 40
38/* Fuctional Enable Reg */ 41#ifdef CONFIG_IBM_EMAC_ZMII
39 42int zmii_attach(void *emac) __init;
40#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x))
41
42#define ZMII_MDI0 0x80000000
43#define ZMII_SMII0 0x40000000
44#define ZMII_RMII0 0x20000000
45#define ZMII_MII0 0x10000000
46#define ZMII_MDI1 0x08000000
47#define ZMII_SMII1 0x04000000
48#define ZMII_RMII1 0x02000000
49#define ZMII_MII1 0x01000000
50#define ZMII_MDI2 0x00800000
51#define ZMII_SMII2 0x00400000
52#define ZMII_RMII2 0x00200000
53#define ZMII_MII2 0x00100000
54#define ZMII_MDI3 0x00080000
55#define ZMII_SMII3 0x00040000
56#define ZMII_RMII3 0x00020000
57#define ZMII_MII3 0x00010000
58 43
59/* Speed Selection reg */ 44void __zmii_fini(struct ocp_device *ocpdev, int input) __exit;
45static inline void zmii_fini(struct ocp_device *ocpdev, int input)
46{
47 if (ocpdev)
48 __zmii_fini(ocpdev, input);
49}
60 50
61#define ZMII_SCI0 0x40000000 51void __zmii_enable_mdio(struct ocp_device *ocpdev, int input);
62#define ZMII_FSS0 0x20000000 52static inline void zmii_enable_mdio(struct ocp_device *ocpdev, int input)
63#define ZMII_SP0 0x10000000 53{
64#define ZMII_SCI1 0x04000000 54 if (ocpdev)
65#define ZMII_FSS1 0x02000000 55 __zmii_enable_mdio(ocpdev, input);
66#define ZMII_SP1 0x01000000 56}
67#define ZMII_SCI2 0x00400000
68#define ZMII_FSS2 0x00200000
69#define ZMII_SP2 0x00100000
70#define ZMII_SCI3 0x00040000
71#define ZMII_FSS3 0x00020000
72#define ZMII_SP3 0x00010000
73 57
74#define ZMII_MII0_100MB ZMII_SP0 58void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed);
75#define ZMII_MII0_10MB ~ZMII_SP0 59static inline void zmii_set_speed(struct ocp_device *ocpdev, int input,
76#define ZMII_MII1_100MB ZMII_SP1 60 int speed)
77#define ZMII_MII1_10MB ~ZMII_SP1 61{
78#define ZMII_MII2_100MB ZMII_SP2 62 if (ocpdev)
79#define ZMII_MII2_10MB ~ZMII_SP2 63 __zmii_set_speed(ocpdev, input, speed);
80#define ZMII_MII3_100MB ZMII_SP3 64}
81#define ZMII_MII3_10MB ~ZMII_SP3
82 65
83/* SMII Status reg */ 66int __zmii_get_regs_len(struct ocp_device *ocpdev);
67static inline int zmii_get_regs_len(struct ocp_device *ocpdev)
68{
69 return ocpdev ? __zmii_get_regs_len(ocpdev) : 0;
70}
84 71
85#define ZMII_STS0 0xFF000000 /* EMAC0 smii status mask */ 72void *zmii_dump_regs(struct ocp_device *ocpdev, void *buf);
86#define ZMII_STS1 0x00FF0000 /* EMAC1 smii status mask */
87 73
88#define SMII 0 74#else
89#define RMII 1 75# define zmii_attach(x) 0
90#define MII 2 76# define zmii_fini(x,y) ((void)0)
91#define MDI 3 77# define zmii_enable_mdio(x,y) ((void)0)
78# define zmii_set_speed(x,y,z) ((void)0)
79# define zmii_get_regs_len(x) 0
80# define zmii_dump_regs(x,buf) (buf)
81#endif /* !CONFIG_IBM_EMAC_ZMII */
92 82
93#endif /* _IBM_EMAC_ZMII_H_ */ 83#endif /* _IBM_EMAC_ZMII_H_ */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a2c4dd4fb221..94239f67f3a3 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,7 +96,7 @@ static void ibmveth_proc_unregister_driver(void);
96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 99static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100 100
101#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
102#define IBMVETH_PROC_DIR "net/ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -181,6 +181,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
181 atomic_set(&pool->available, 0); 181 atomic_set(&pool->available, 0);
182 pool->producer_index = 0; 182 pool->producer_index = 0;
183 pool->consumer_index = 0; 183 pool->consumer_index = 0;
184 pool->active = 0;
184 185
185 return 0; 186 return 0;
186} 187}
@@ -236,7 +237,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 237 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
237 238
238 if(lpar_rc != H_Success) { 239 if(lpar_rc != H_Success) {
239 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 240 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL; 241 pool->skbuff[index] = NULL;
241 pool->consumer_index--; 242 pool->consumer_index--;
242 dma_unmap_single(&adapter->vdev->dev, 243 dma_unmap_single(&adapter->vdev->dev,
@@ -255,37 +256,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 atomic_add(buffers_added, &(pool->available)); 256 atomic_add(buffers_added, &(pool->available));
256} 257}
257 258
258/* check if replenishing is needed. */ 259/* replenish routine */
259static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
260{
261 return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
262 (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
263 (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
264}
265
266/* kick the replenish tasklet if we need replenishing and it isn't already running */
267static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
268{
269 if(ibmveth_is_replenishing_needed(adapter) &&
270 (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
271 schedule_work(&adapter->replenish_task);
272 }
273}
274
275/* replenish tasklet routine */
276static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 260static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
277{ 261{
262 int i;
263
278 adapter->replenish_task_cycles++; 264 adapter->replenish_task_cycles++;
279 265
280 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 266 for(i = 0; i < IbmVethNumBufferPools; i++)
281 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 267 if(adapter->rx_buff_pool[i].active)
282 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 268 ibmveth_replenish_buffer_pool(adapter,
269 &adapter->rx_buff_pool[i]);
283 270
284 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
285
286 atomic_inc(&adapter->not_replenishing);
287
288 ibmveth_schedule_replenishing(adapter);
289} 272}
290 273
291/* empty and free ana buffer pool - also used to do cleanup in error paths */ 274/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -293,10 +276,8 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
293{ 276{
294 int i; 277 int i;
295 278
296 if(pool->free_map) { 279 kfree(pool->free_map);
297 kfree(pool->free_map); 280 pool->free_map = NULL;
298 pool->free_map = NULL;
299 }
300 281
301 if(pool->skbuff && pool->dma_addr) { 282 if(pool->skbuff && pool->dma_addr) {
302 for(i = 0; i < pool->size; ++i) { 283 for(i = 0; i < pool->size; ++i) {
@@ -321,6 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
321 kfree(pool->skbuff); 302 kfree(pool->skbuff);
322 pool->skbuff = NULL; 303 pool->skbuff = NULL;
323 } 304 }
305 pool->active = 0;
324} 306}
325 307
326/* remove a buffer from a pool */ 308/* remove a buffer from a pool */
@@ -379,6 +361,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
379 ibmveth_assert(pool < IbmVethNumBufferPools); 361 ibmveth_assert(pool < IbmVethNumBufferPools);
380 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 362 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
381 363
364 if(!adapter->rx_buff_pool[pool].active) {
365 ibmveth_rxq_harvest_buffer(adapter);
366 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
367 return;
368 }
369
382 desc.desc = 0; 370 desc.desc = 0;
383 desc.fields.valid = 1; 371 desc.fields.valid = 1;
384 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; 372 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@ -409,6 +397,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
409 397
410static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 398static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
411{ 399{
400 int i;
401
412 if(adapter->buffer_list_addr != NULL) { 402 if(adapter->buffer_list_addr != NULL) {
413 if(!dma_mapping_error(adapter->buffer_list_dma)) { 403 if(!dma_mapping_error(adapter->buffer_list_dma)) {
414 dma_unmap_single(&adapter->vdev->dev, 404 dma_unmap_single(&adapter->vdev->dev,
@@ -443,26 +433,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
443 adapter->rx_queue.queue_addr = NULL; 433 adapter->rx_queue.queue_addr = NULL;
444 } 434 }
445 435
446 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 436 for(i = 0; i<IbmVethNumBufferPools; i++)
447 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 437 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
448 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
449} 438}
450 439
451static int ibmveth_open(struct net_device *netdev) 440static int ibmveth_open(struct net_device *netdev)
452{ 441{
453 struct ibmveth_adapter *adapter = netdev->priv; 442 struct ibmveth_adapter *adapter = netdev->priv;
454 u64 mac_address = 0; 443 u64 mac_address = 0;
455 int rxq_entries; 444 int rxq_entries = 1;
456 unsigned long lpar_rc; 445 unsigned long lpar_rc;
457 int rc; 446 int rc;
458 union ibmveth_buf_desc rxq_desc; 447 union ibmveth_buf_desc rxq_desc;
448 int i;
459 449
460 ibmveth_debug_printk("open starting\n"); 450 ibmveth_debug_printk("open starting\n");
461 451
462 rxq_entries = 452 for(i = 0; i<IbmVethNumBufferPools; i++)
463 adapter->rx_buff_pool[0].size + 453 rxq_entries += adapter->rx_buff_pool[i].size;
464 adapter->rx_buff_pool[1].size +
465 adapter->rx_buff_pool[2].size + 1;
466 454
467 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 455 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
468 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 456 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -502,14 +490,8 @@ static int ibmveth_open(struct net_device *netdev)
502 adapter->rx_queue.num_slots = rxq_entries; 490 adapter->rx_queue.num_slots = rxq_entries;
503 adapter->rx_queue.toggle = 1; 491 adapter->rx_queue.toggle = 1;
504 492
505 if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || 493 /* call change_mtu to init the buffer pools based in initial mtu */
506 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || 494 ibmveth_change_mtu(netdev, netdev->mtu);
507 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
508 {
509 ibmveth_error_printk("unable to allocate buffer pools\n");
510 ibmveth_cleanup(adapter);
511 return -ENOMEM;
512 }
513 495
514 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 496 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
515 mac_address = mac_address >> 16; 497 mac_address = mac_address >> 16;
@@ -532,7 +514,7 @@ static int ibmveth_open(struct net_device *netdev)
532 514
533 if(lpar_rc != H_Success) { 515 if(lpar_rc != H_Success) {
534 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); 516 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
535 ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n", 517 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
536 adapter->buffer_list_dma, 518 adapter->buffer_list_dma,
537 adapter->filter_list_dma, 519 adapter->filter_list_dma,
538 rxq_desc.desc, 520 rxq_desc.desc,
@@ -552,10 +534,10 @@ static int ibmveth_open(struct net_device *netdev)
552 return rc; 534 return rc;
553 } 535 }
554 536
555 netif_start_queue(netdev); 537 ibmveth_debug_printk("initial replenish cycle\n");
538 ibmveth_interrupt(netdev->irq, netdev, NULL);
556 539
557 ibmveth_debug_printk("scheduling initial replenish cycle\n"); 540 netif_start_queue(netdev);
558 ibmveth_schedule_replenishing(adapter);
559 541
560 ibmveth_debug_printk("open complete\n"); 542 ibmveth_debug_printk("open complete\n");
561 543
@@ -573,9 +555,6 @@ static int ibmveth_close(struct net_device *netdev)
573 555
574 free_irq(netdev->irq, netdev); 556 free_irq(netdev->irq, netdev);
575 557
576 cancel_delayed_work(&adapter->replenish_task);
577 flush_scheduled_work();
578
579 do { 558 do {
580 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 559 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
581 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 560 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -640,12 +619,18 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
640 unsigned long lpar_rc; 619 unsigned long lpar_rc;
641 int nfrags = 0, curfrag; 620 int nfrags = 0, curfrag;
642 unsigned long correlator; 621 unsigned long correlator;
622 unsigned long flags;
643 unsigned int retry_count; 623 unsigned int retry_count;
624 unsigned int tx_dropped = 0;
625 unsigned int tx_bytes = 0;
626 unsigned int tx_packets = 0;
627 unsigned int tx_send_failed = 0;
628 unsigned int tx_map_failed = 0;
629
644 630
645 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { 631 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
646 adapter->stats.tx_dropped++; 632 tx_dropped++;
647 dev_kfree_skb(skb); 633 goto out;
648 return 0;
649 } 634 }
650 635
651 memset(&desc, 0, sizeof(desc)); 636 memset(&desc, 0, sizeof(desc));
@@ -664,10 +649,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
664 649
665 if(dma_mapping_error(desc[0].fields.address)) { 650 if(dma_mapping_error(desc[0].fields.address)) {
666 ibmveth_error_printk("tx: unable to map initial fragment\n"); 651 ibmveth_error_printk("tx: unable to map initial fragment\n");
667 adapter->tx_map_failed++; 652 tx_map_failed++;
668 adapter->stats.tx_dropped++; 653 tx_dropped++;
669 dev_kfree_skb(skb); 654 goto out;
670 return 0;
671 } 655 }
672 656
673 curfrag = nfrags; 657 curfrag = nfrags;
@@ -684,8 +668,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
684 668
685 if(dma_mapping_error(desc[curfrag+1].fields.address)) { 669 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
686 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); 670 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
687 adapter->tx_map_failed++; 671 tx_map_failed++;
688 adapter->stats.tx_dropped++; 672 tx_dropped++;
689 /* Free all the mappings we just created */ 673 /* Free all the mappings we just created */
690 while(curfrag < nfrags) { 674 while(curfrag < nfrags) {
691 dma_unmap_single(&adapter->vdev->dev, 675 dma_unmap_single(&adapter->vdev->dev,
@@ -694,8 +678,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
694 DMA_TO_DEVICE); 678 DMA_TO_DEVICE);
695 curfrag++; 679 curfrag++;
696 } 680 }
697 dev_kfree_skb(skb); 681 goto out;
698 return 0;
699 } 682 }
700 } 683 }
701 684
@@ -720,11 +703,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
720 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, 703 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
721 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); 704 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
722 } 705 }
723 adapter->tx_send_failed++; 706 tx_send_failed++;
724 adapter->stats.tx_dropped++; 707 tx_dropped++;
725 } else { 708 } else {
726 adapter->stats.tx_packets++; 709 tx_packets++;
727 adapter->stats.tx_bytes += skb->len; 710 tx_bytes += skb->len;
711 netdev->trans_start = jiffies;
728 } 712 }
729 713
730 do { 714 do {
@@ -733,6 +717,14 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
733 desc[nfrags].fields.length, DMA_TO_DEVICE); 717 desc[nfrags].fields.length, DMA_TO_DEVICE);
734 } while(--nfrags >= 0); 718 } while(--nfrags >= 0);
735 719
720out: spin_lock_irqsave(&adapter->stats_lock, flags);
721 adapter->stats.tx_dropped += tx_dropped;
722 adapter->stats.tx_bytes += tx_bytes;
723 adapter->stats.tx_packets += tx_packets;
724 adapter->tx_send_failed += tx_send_failed;
725 adapter->tx_map_failed += tx_map_failed;
726 spin_unlock_irqrestore(&adapter->stats_lock, flags);
727
736 dev_kfree_skb(skb); 728 dev_kfree_skb(skb);
737 return 0; 729 return 0;
738} 730}
@@ -776,13 +768,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
776 adapter->stats.rx_packets++; 768 adapter->stats.rx_packets++;
777 adapter->stats.rx_bytes += length; 769 adapter->stats.rx_bytes += length;
778 frames_processed++; 770 frames_processed++;
771 netdev->last_rx = jiffies;
779 } 772 }
780 } else { 773 } else {
781 more_work = 0; 774 more_work = 0;
782 } 775 }
783 } while(more_work && (frames_processed < max_frames_to_process)); 776 } while(more_work && (frames_processed < max_frames_to_process));
784 777
785 ibmveth_schedule_replenishing(adapter); 778 ibmveth_replenish_task(adapter);
786 779
787 if(more_work) { 780 if(more_work) {
788 /* more work to do - return that we are not done yet */ 781 /* more work to do - return that we are not done yet */
@@ -883,17 +876,54 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
883 876
884static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 877static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
885{ 878{
886 if ((new_mtu < 68) || (new_mtu > (1<<20))) 879 struct ibmveth_adapter *adapter = dev->priv;
880 int i;
881 int prev_smaller = 1;
882
883 if ((new_mtu < 68) ||
884 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
887 return -EINVAL; 885 return -EINVAL;
886
887 for(i = 0; i<IbmVethNumBufferPools; i++) {
888 int activate = 0;
889 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
890 activate = 1;
891 prev_smaller= 1;
892 } else {
893 if (prev_smaller)
894 activate = 1;
895 prev_smaller= 0;
896 }
897
898 if (activate && !adapter->rx_buff_pool[i].active) {
899 struct ibmveth_buff_pool *pool =
900 &adapter->rx_buff_pool[i];
901 if(ibmveth_alloc_buffer_pool(pool)) {
902 ibmveth_error_printk("unable to alloc pool\n");
903 return -ENOMEM;
904 }
905 adapter->rx_buff_pool[i].active = 1;
906 } else if (!activate && adapter->rx_buff_pool[i].active) {
907 adapter->rx_buff_pool[i].active = 0;
908 h_free_logical_lan_buffer(adapter->vdev->unit_address,
909 (u64)pool_size[i]);
910 }
911
912 }
913
914 /* kick the interrupt handler so that the new buffer pools get
915 replenished or deallocated */
916 ibmveth_interrupt(dev->irq, dev, NULL);
917
888 dev->mtu = new_mtu; 918 dev->mtu = new_mtu;
889 return 0; 919 return 0;
890} 920}
891 921
892static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 922static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
893{ 923{
894 int rc; 924 int rc, i;
895 struct net_device *netdev; 925 struct net_device *netdev;
896 struct ibmveth_adapter *adapter; 926 struct ibmveth_adapter *adapter = NULL;
897 927
898 unsigned char *mac_addr_p; 928 unsigned char *mac_addr_p;
899 unsigned int *mcastFilterSize_p; 929 unsigned int *mcastFilterSize_p;
@@ -960,23 +990,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
960 netdev->ethtool_ops = &netdev_ethtool_ops; 990 netdev->ethtool_ops = &netdev_ethtool_ops;
961 netdev->change_mtu = ibmveth_change_mtu; 991 netdev->change_mtu = ibmveth_change_mtu;
962 SET_NETDEV_DEV(netdev, &dev->dev); 992 SET_NETDEV_DEV(netdev, &dev->dev);
993 netdev->features |= NETIF_F_LLTX;
994 spin_lock_init(&adapter->stats_lock);
963 995
964 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 996 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
965 997
966 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); 998 for(i = 0; i<IbmVethNumBufferPools; i++)
967 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); 999 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
968 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); 1000 pool_count[i], pool_size[i]);
969 1001
970 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1002 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
971 1003
972 INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
973
974 adapter->buffer_list_dma = DMA_ERROR_CODE; 1004 adapter->buffer_list_dma = DMA_ERROR_CODE;
975 adapter->filter_list_dma = DMA_ERROR_CODE; 1005 adapter->filter_list_dma = DMA_ERROR_CODE;
976 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1006 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
977 1007
978 atomic_set(&adapter->not_replenishing, 1);
979
980 ibmveth_debug_printk("registering netdev...\n"); 1008 ibmveth_debug_printk("registering netdev...\n");
981 1009
982 rc = register_netdev(netdev); 1010 rc = register_netdev(netdev);
@@ -1146,14 +1174,16 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1146 { "network", "IBM,l-lan"}, 1174 { "network", "IBM,l-lan"},
1147 { "", "" } 1175 { "", "" }
1148}; 1176};
1149
1150MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1177MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1151 1178
1152static struct vio_driver ibmveth_driver = { 1179static struct vio_driver ibmveth_driver = {
1153 .name = (char *)ibmveth_driver_name, 1180 .id_table = ibmveth_device_table,
1154 .id_table = ibmveth_device_table, 1181 .probe = ibmveth_probe,
1155 .probe = ibmveth_probe, 1182 .remove = ibmveth_remove,
1156 .remove = ibmveth_remove 1183 .driver = {
1184 .name = ibmveth_driver_name,
1185 .owner = THIS_MODULE,
1186 }
1157}; 1187};
1158 1188
1159static int __init ibmveth_module_init(void) 1189static int __init ibmveth_module_init(void)
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 51a470da9686..46919a814fca 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -49,6 +49,7 @@
49#define H_SEND_LOGICAL_LAN 0x120 49#define H_SEND_LOGICAL_LAN 0x120
50#define H_MULTICAST_CTRL 0x130 50#define H_MULTICAST_CTRL 0x130
51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C 51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
52#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
52 53
53/* hcall macros */ 54/* hcall macros */
54#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ 55#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
@@ -69,13 +70,15 @@
69#define h_change_logical_lan_mac(ua, mac) \ 70#define h_change_logical_lan_mac(ua, mac) \
70 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 71 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
71 72
72#define IbmVethNumBufferPools 3 73#define h_free_logical_lan_buffer(ua, bufsize) \
73#define IbmVethPool0DftSize (1024 * 2) 74 plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize)
74#define IbmVethPool1DftSize (1024 * 4) 75
75#define IbmVethPool2DftSize (1024 * 10) 76#define IbmVethNumBufferPools 5
76#define IbmVethPool0DftCnt 256 77#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
77#define IbmVethPool1DftCnt 256 78
78#define IbmVethPool2DftCnt 256 79/* pool_size should be sorted */
80static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
81static int pool_count[] = { 256, 768, 256, 256, 256 };
79 82
80#define IBM_VETH_INVALID_MAP ((u16)0xffff) 83#define IBM_VETH_INVALID_MAP ((u16)0xffff)
81 84
@@ -90,6 +93,7 @@ struct ibmveth_buff_pool {
90 u16 *free_map; 93 u16 *free_map;
91 dma_addr_t *dma_addr; 94 dma_addr_t *dma_addr;
92 struct sk_buff **skbuff; 95 struct sk_buff **skbuff;
96 int active;
93}; 97};
94 98
95struct ibmveth_rx_q { 99struct ibmveth_rx_q {
@@ -114,10 +118,6 @@ struct ibmveth_adapter {
114 dma_addr_t filter_list_dma; 118 dma_addr_t filter_list_dma;
115 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 119 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
116 struct ibmveth_rx_q rx_queue; 120 struct ibmveth_rx_q rx_queue;
117 atomic_t not_replenishing;
118
119 /* helper tasks */
120 struct work_struct replenish_task;
121 121
122 /* adapter specific stats */ 122 /* adapter specific stats */
123 u64 replenish_task_cycles; 123 u64 replenish_task_cycles;
@@ -131,6 +131,7 @@ struct ibmveth_adapter {
131 u64 tx_linearize_failed; 131 u64 tx_linearize_failed;
132 u64 tx_map_failed; 132 u64 tx_map_failed;
133 u64 tx_send_failed; 133 u64 tx_send_failed;
134 spinlock_t stats_lock;
134}; 135};
135 136
136struct ibmveth_buf_desc_fields { 137struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index ca5914091d3a..d54156f11e61 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -400,5 +400,15 @@ config VIA_FIR
400 To compile it as a module, choose M here: the module will be called 400 To compile it as a module, choose M here: the module will be called
401 via-ircc. 401 via-ircc.
402 402
403config PXA_FICP
404 tristate "Intel PXA2xx Internal FICP"
405 depends on ARCH_PXA && IRDA
406 help
407 Say Y or M here if you want to build support for the PXA2xx
408 built-in IRDA interface which can support both SIR and FIR.
409 This driver relies on platform specific helper routines so
410 available capabilities may vary from one PXA2xx target to
411 another.
412
403endmenu 413endmenu
404 414
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 29a8bd812b21..e7a8b7f7f5dd 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SMC_IRCC_FIR) += smsc-ircc2.o
18obj-$(CONFIG_ALI_FIR) += ali-ircc.o 18obj-$(CONFIG_ALI_FIR) += ali-ircc.o
19obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o 19obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
20obj-$(CONFIG_VIA_FIR) += via-ircc.o 20obj-$(CONFIG_VIA_FIR) += via-ircc.o
21obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
21# Old dongle drivers for old SIR drivers 22# Old dongle drivers for old SIR drivers
22obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o 23obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o
23obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o 24obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 0a08c539c051..0282771b1cbb 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1695,11 +1695,9 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1695 1695
1696freebufs: 1696freebufs:
1697 for (i = 0; i < TX_SLOTS; ++i) 1697 for (i = 0; i < TX_SLOTS; ++i)
1698 if (self->tx_bufs[i]) 1698 kfree (self->tx_bufs[i]);
1699 kfree (self->tx_bufs[i]);
1700 for (i = 0; i < RX_SLOTS; ++i) 1699 for (i = 0; i < RX_SLOTS; ++i)
1701 if (self->rx_bufs[i]) 1700 kfree (self->rx_bufs[i]);
1702 kfree (self->rx_bufs[i]);
1703 kfree(self->ringbuf); 1701 kfree(self->ringbuf);
1704 1702
1705freeregion: 1703freeregion:
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 6c766fdc51a6..c22c0517883c 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1168,10 +1168,8 @@ static inline void irda_usb_close(struct irda_usb_cb *self)
1168 unregister_netdev(self->netdev); 1168 unregister_netdev(self->netdev);
1169 1169
1170 /* Remove the speed buffer */ 1170 /* Remove the speed buffer */
1171 if (self->speed_buff != NULL) { 1171 kfree(self->speed_buff);
1172 kfree(self->speed_buff); 1172 self->speed_buff = NULL;
1173 self->speed_buff = NULL;
1174 }
1175} 1173}
1176 1174
1177/********************** USB CONFIG SUBROUTINES **********************/ 1175/********************** USB CONFIG SUBROUTINES **********************/
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 5971315f3fa0..3d016a498e1d 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -235,8 +235,7 @@ static int irport_close(struct irport_cb *self)
235 __FUNCTION__, self->io.sir_base); 235 __FUNCTION__, self->io.sir_base);
236 release_region(self->io.sir_base, self->io.sir_ext); 236 release_region(self->io.sir_base, self->io.sir_ext);
237 237
238 if (self->tx_buff.head) 238 kfree(self->tx_buff.head);
239 kfree(self->tx_buff.head);
240 239
241 if (self->rx_buff.skb) 240 if (self->rx_buff.skb)
242 kfree_skb(self->rx_buff.skb); 241 kfree_skb(self->rx_buff.skb);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
new file mode 100644
index 000000000000..e1aa9910503b
--- /dev/null
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -0,0 +1,866 @@
1/*
2 * linux/drivers/net/irda/pxaficp_ir.c
3 *
4 * Based on sa1100_ir.c by Russell King
5 *
6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
13 *
14 */
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/netdevice.h>
21#include <linux/slab.h>
22#include <linux/rtnetlink.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/platform_device.h>
26#include <linux/pm.h>
27
28#include <net/irda/irda.h>
29#include <net/irda/irmod.h>
30#include <net/irda/wrapper.h>
31#include <net/irda/irda_device.h>
32
33#include <asm/irq.h>
34#include <asm/dma.h>
35#include <asm/delay.h>
36#include <asm/hardware.h>
37#include <asm/arch/irda.h>
38#include <asm/arch/pxa-regs.h>
39
40#ifdef CONFIG_MACH_MAINSTONE
41#include <asm/arch/mainstone.h>
42#endif
43
44#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
45#define IrSR_RXPL_POS_IS_ZERO 0x0
46#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
47#define IrSR_TXPL_POS_IS_ZERO 0x0
48#define IrSR_XMODE_PULSE_1_6 (1<<2)
49#define IrSR_XMODE_PULSE_3_16 0x0
50#define IrSR_RCVEIR_IR_MODE (1<<1)
51#define IrSR_RCVEIR_UART_MODE 0x0
52#define IrSR_XMITIR_IR_MODE (1<<0)
53#define IrSR_XMITIR_UART_MODE 0x0
54
55#define IrSR_IR_RECEIVE_ON (\
56 IrSR_RXPL_NEG_IS_ZERO | \
57 IrSR_TXPL_POS_IS_ZERO | \
58 IrSR_XMODE_PULSE_3_16 | \
59 IrSR_RCVEIR_IR_MODE | \
60 IrSR_XMITIR_UART_MODE)
61
62#define IrSR_IR_TRANSMIT_ON (\
63 IrSR_RXPL_NEG_IS_ZERO | \
64 IrSR_TXPL_POS_IS_ZERO | \
65 IrSR_XMODE_PULSE_3_16 | \
66 IrSR_RCVEIR_UART_MODE | \
67 IrSR_XMITIR_IR_MODE)
68
69struct pxa_irda {
70 int speed;
71 int newspeed;
72 unsigned long last_oscr;
73
74 unsigned char *dma_rx_buff;
75 unsigned char *dma_tx_buff;
76 dma_addr_t dma_rx_buff_phy;
77 dma_addr_t dma_tx_buff_phy;
78 unsigned int dma_tx_buff_len;
79 int txdma;
80 int rxdma;
81
82 struct net_device_stats stats;
83 struct irlap_cb *irlap;
84 struct qos_info qos;
85
86 iobuff_t tx_buff;
87 iobuff_t rx_buff;
88
89 struct device *dev;
90 struct pxaficp_platform_data *pdata;
91};
92
93
94#define IS_FIR(si) ((si)->speed >= 4000000)
95#define IRDA_FRAME_SIZE_LIMIT 2047
96
97inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
98{
99 DCSR(si->rxdma) = DCSR_NODESC;
100 DSADR(si->rxdma) = __PREG(ICDR);
101 DTADR(si->rxdma) = si->dma_rx_buff_phy;
102 DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
103 DCSR(si->rxdma) |= DCSR_RUN;
104}
105
106inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
107{
108 DCSR(si->txdma) = DCSR_NODESC;
109 DSADR(si->txdma) = si->dma_tx_buff_phy;
110 DTADR(si->txdma) = __PREG(ICDR);
111 DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
112 DCSR(si->txdma) |= DCSR_RUN;
113}
114
115/*
116 * Set the IrDA communications speed.
117 */
118static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
119{
120 unsigned long flags;
121 unsigned int divisor;
122
123 switch (speed) {
124 case 9600: case 19200: case 38400:
125 case 57600: case 115200:
126
127 /* refer to PXA250/210 Developer's Manual 10-7 */
128 /* BaudRate = 14.7456 MHz / (16*Divisor) */
129 divisor = 14745600 / (16 * speed);
130
131 local_irq_save(flags);
132
133 if (IS_FIR(si)) {
134 /* stop RX DMA */
135 DCSR(si->rxdma) &= ~DCSR_RUN;
136 /* disable FICP */
137 ICCR0 = 0;
138 pxa_set_cken(CKEN13_FICP, 0);
139
140 /* set board transceiver to SIR mode */
141 si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
142
143 /* configure GPIO46/47 */
144 pxa_gpio_mode(GPIO46_STRXD_MD);
145 pxa_gpio_mode(GPIO47_STTXD_MD);
146
147 /* enable the STUART clock */
148 pxa_set_cken(CKEN5_STUART, 1);
149 }
150
151 /* disable STUART first */
152 STIER = 0;
153
154 /* access DLL & DLH */
155 STLCR |= LCR_DLAB;
156 STDLL = divisor & 0xff;
157 STDLH = divisor >> 8;
158 STLCR &= ~LCR_DLAB;
159
160 si->speed = speed;
161 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
162 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
163
164 local_irq_restore(flags);
165 break;
166
167 case 4000000:
168 local_irq_save(flags);
169
170 /* disable STUART */
171 STIER = 0;
172 STISR = 0;
173 pxa_set_cken(CKEN5_STUART, 0);
174
175 /* disable FICP first */
176 ICCR0 = 0;
177
178 /* set board transceiver to FIR mode */
179 si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
180
181 /* configure GPIO46/47 */
182 pxa_gpio_mode(GPIO46_ICPRXD_MD);
183 pxa_gpio_mode(GPIO47_ICPTXD_MD);
184
185 /* enable the FICP clock */
186 pxa_set_cken(CKEN13_FICP, 1);
187
188 si->speed = speed;
189 pxa_irda_fir_dma_rx_start(si);
190 ICCR0 = ICCR0_ITR | ICCR0_RXE;
191
192 local_irq_restore(flags);
193 break;
194
195 default:
196 return -EINVAL;
197 }
198
199 return 0;
200}
201
202/* SIR interrupt service routine. */
203static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id, struct pt_regs *regs)
204{
205 struct net_device *dev = dev_id;
206 struct pxa_irda *si = netdev_priv(dev);
207 int iir, lsr, data;
208
209 iir = STIIR;
210
211 switch (iir & 0x0F) {
212 case 0x06: /* Receiver Line Status */
213 lsr = STLSR;
214 while (lsr & LSR_FIFOE) {
215 data = STRBR;
216 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
217 printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
218 si->stats.rx_errors++;
219 if (lsr & LSR_FE)
220 si->stats.rx_frame_errors++;
221 if (lsr & LSR_OE)
222 si->stats.rx_fifo_errors++;
223 } else {
224 si->stats.rx_bytes++;
225 async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
226 }
227 lsr = STLSR;
228 }
229 dev->last_rx = jiffies;
230 si->last_oscr = OSCR;
231 break;
232
233 case 0x04: /* Received Data Available */
234 /* forth through */
235
236 case 0x0C: /* Character Timeout Indication */
237 do {
238 si->stats.rx_bytes++;
239 async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
240 } while (STLSR & LSR_DR);
241 dev->last_rx = jiffies;
242 si->last_oscr = OSCR;
243 break;
244
245 case 0x02: /* Transmit FIFO Data Request */
246 while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
247 STTHR = *si->tx_buff.data++;
248 si->tx_buff.len -= 1;
249 }
250
251 if (si->tx_buff.len == 0) {
252 si->stats.tx_packets++;
253 si->stats.tx_bytes += si->tx_buff.data -
254 si->tx_buff.head;
255
256 /* We need to ensure that the transmitter has finished. */
257 while ((STLSR & LSR_TEMT) == 0)
258 cpu_relax();
259 si->last_oscr = OSCR;
260
261 /*
262 * Ok, we've finished transmitting. Now enable
263 * the receiver. Sometimes we get a receive IRQ
264 * immediately after a transmit...
265 */
266 if (si->newspeed) {
267 pxa_irda_set_speed(si, si->newspeed);
268 si->newspeed = 0;
269 } else {
270 /* enable IR Receiver, disable IR Transmitter */
271 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
272 /* enable STUART and receive interrupts */
273 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
274 }
275 /* I'm hungry! */
276 netif_wake_queue(dev);
277 }
278 break;
279 }
280
281 return IRQ_HANDLED;
282}
283
284/* FIR Receive DMA interrupt handler */
285static void pxa_irda_fir_dma_rx_irq(int channel, void *data, struct pt_regs *regs)
286{
287 int dcsr = DCSR(channel);
288
289 DCSR(channel) = dcsr & ~DCSR_RUN;
290
291 printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
292}
293
294/* FIR Transmit DMA interrupt handler */
295static void pxa_irda_fir_dma_tx_irq(int channel, void *data, struct pt_regs *regs)
296{
297 struct net_device *dev = data;
298 struct pxa_irda *si = netdev_priv(dev);
299 int dcsr;
300
301 dcsr = DCSR(channel);
302 DCSR(channel) = dcsr & ~DCSR_RUN;
303
304 if (dcsr & DCSR_ENDINTR) {
305 si->stats.tx_packets++;
306 si->stats.tx_bytes += si->dma_tx_buff_len;
307 } else {
308 si->stats.tx_errors++;
309 }
310
311 while (ICSR1 & ICSR1_TBY)
312 cpu_relax();
313 si->last_oscr = OSCR;
314
315 /*
316 * HACK: It looks like the TBY bit is dropped too soon.
317 * Without this delay things break.
318 */
319 udelay(120);
320
321 if (si->newspeed) {
322 pxa_irda_set_speed(si, si->newspeed);
323 si->newspeed = 0;
324 } else {
325 ICCR0 = 0;
326 pxa_irda_fir_dma_rx_start(si);
327 ICCR0 = ICCR0_ITR | ICCR0_RXE;
328 }
329 netif_wake_queue(dev);
330}
331
332/* EIF(Error in FIFO/End in Frame) handler for FIR */
333static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev)
334{
335 unsigned int len, stat, data;
336
337 /* Get the current data position. */
338 len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
339
340 do {
341 /* Read Status, and then Data. */
342 stat = ICSR1;
343 rmb();
344 data = ICDR;
345
346 if (stat & (ICSR1_CRE | ICSR1_ROR)) {
347 si->stats.rx_errors++;
348 if (stat & ICSR1_CRE) {
349 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
350 si->stats.rx_crc_errors++;
351 }
352 if (stat & ICSR1_ROR) {
353 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
354 si->stats.rx_frame_errors++;
355 }
356 } else {
357 si->dma_rx_buff[len++] = data;
358 }
359 /* If we hit the end of frame, there's no point in continuing. */
360 if (stat & ICSR1_EOF)
361 break;
362 } while (ICSR0 & ICSR0_EIF);
363
364 if (stat & ICSR1_EOF) {
365 /* end of frame. */
366 struct sk_buff *skb = alloc_skb(len+1,GFP_ATOMIC);
367 if (!skb) {
368 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
369 si->stats.rx_dropped++;
370 return;
371 }
372
373 /* Align IP header to 20 bytes */
374 skb_reserve(skb, 1);
375 memcpy(skb->data, si->dma_rx_buff, len);
376 skb_put(skb, len);
377
378 /* Feed it to IrLAP */
379 skb->dev = dev;
380 skb->mac.raw = skb->data;
381 skb->protocol = htons(ETH_P_IRDA);
382 netif_rx(skb);
383
384 si->stats.rx_packets++;
385 si->stats.rx_bytes += len;
386
387 dev->last_rx = jiffies;
388 }
389}
390
391/* FIR interrupt handler */
392static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id, struct pt_regs *regs)
393{
394 struct net_device *dev = dev_id;
395 struct pxa_irda *si = netdev_priv(dev);
396 int icsr0;
397
398 /* stop RX DMA */
399 DCSR(si->rxdma) &= ~DCSR_RUN;
400 si->last_oscr = OSCR;
401 icsr0 = ICSR0;
402
403 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
404 if (icsr0 & ICSR0_FRE) {
405 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
406 si->stats.rx_frame_errors++;
407 } else {
408 printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
409 si->stats.rx_errors++;
410 }
411 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
412 }
413
414 if (icsr0 & ICSR0_EIF) {
415 /* An error in FIFO occured, or there is a end of frame */
416 pxa_irda_fir_irq_eif(si, dev);
417 }
418
419 ICCR0 = 0;
420 pxa_irda_fir_dma_rx_start(si);
421 ICCR0 = ICCR0_ITR | ICCR0_RXE;
422
423 return IRQ_HANDLED;
424}
425
426/* hard_xmit interface of irda device */
427static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
428{
429 struct pxa_irda *si = netdev_priv(dev);
430 int speed = irda_get_next_speed(skb);
431
432 /*
433 * Does this packet contain a request to change the interface
434 * speed? If so, remember it until we complete the transmission
435 * of this frame.
436 */
437 if (speed != si->speed && speed != -1)
438 si->newspeed = speed;
439
440 /*
441 * If this is an empty frame, we can bypass a lot.
442 */
443 if (skb->len == 0) {
444 if (si->newspeed) {
445 si->newspeed = 0;
446 pxa_irda_set_speed(si, speed);
447 }
448 dev_kfree_skb(skb);
449 return 0;
450 }
451
452 netif_stop_queue(dev);
453
454 if (!IS_FIR(si)) {
455 si->tx_buff.data = si->tx_buff.head;
456 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
457
458 /* Disable STUART interrupts and switch to transmit mode. */
459 STIER = 0;
460 STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
461
462 /* enable STUART and transmit interrupts */
463 STIER = IER_UUE | IER_TIE;
464 } else {
465 unsigned long mtt = irda_get_mtt(skb);
466
467 si->dma_tx_buff_len = skb->len;
468 memcpy(si->dma_tx_buff, skb->data, skb->len);
469
470 if (mtt)
471 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
472 cpu_relax();
473
474 /* stop RX DMA, disable FICP */
475 DCSR(si->rxdma) &= ~DCSR_RUN;
476 ICCR0 = 0;
477
478 pxa_irda_fir_dma_tx_start(si);
479 ICCR0 = ICCR0_ITR | ICCR0_TXE;
480 }
481
482 dev_kfree_skb(skb);
483 dev->trans_start = jiffies;
484 return 0;
485}
486
487static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
488{
489 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
490 struct pxa_irda *si = netdev_priv(dev);
491 int ret;
492
493 switch (cmd) {
494 case SIOCSBANDWIDTH:
495 ret = -EPERM;
496 if (capable(CAP_NET_ADMIN)) {
497 /*
498 * We are unable to set the speed if the
499 * device is not running.
500 */
501 if (netif_running(dev)) {
502 ret = pxa_irda_set_speed(si,
503 rq->ifr_baudrate);
504 } else {
505 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
506 ret = 0;
507 }
508 }
509 break;
510
511 case SIOCSMEDIABUSY:
512 ret = -EPERM;
513 if (capable(CAP_NET_ADMIN)) {
514 irda_device_set_media_busy(dev, TRUE);
515 ret = 0;
516 }
517 break;
518
519 case SIOCGRECEIVING:
520 ret = 0;
521 rq->ifr_receiving = IS_FIR(si) ? 0
522 : si->rx_buff.state != OUTSIDE_FRAME;
523 break;
524
525 default:
526 ret = -EOPNOTSUPP;
527 break;
528 }
529
530 return ret;
531}
532
533static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
534{
535 struct pxa_irda *si = netdev_priv(dev);
536 return &si->stats;
537}
538
539static void pxa_irda_startup(struct pxa_irda *si)
540{
541 /* Disable STUART interrupts */
542 STIER = 0;
543 /* enable STUART interrupt to the processor */
544 STMCR = MCR_OUT2;
545 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
546 STLCR = LCR_WLS0 | LCR_WLS1;
547 /* enable FIFO, we use FIFO to improve performance */
548 STFCR = FCR_TRFIFOE | FCR_ITL_32;
549
550 /* disable FICP */
551 ICCR0 = 0;
552 /* configure FICP ICCR2 */
553 ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
554
555 /* configure DMAC */
556 DRCMR17 = si->rxdma | DRCMR_MAPVLD;
557 DRCMR18 = si->txdma | DRCMR_MAPVLD;
558
559 /* force SIR reinitialization */
560 si->speed = 4000000;
561 pxa_irda_set_speed(si, 9600);
562
563 printk(KERN_DEBUG "pxa_ir: irda startup\n");
564}
565
566static void pxa_irda_shutdown(struct pxa_irda *si)
567{
568 unsigned long flags;
569
570 local_irq_save(flags);
571
572 /* disable STUART and interrupt */
573 STIER = 0;
574 /* disable STUART SIR mode */
575 STISR = 0;
576 /* disable the STUART clock */
577 pxa_set_cken(CKEN5_STUART, 0);
578
579 /* disable DMA */
580 DCSR(si->txdma) &= ~DCSR_RUN;
581 DCSR(si->rxdma) &= ~DCSR_RUN;
582 /* disable FICP */
583 ICCR0 = 0;
584 /* disable the FICP clock */
585 pxa_set_cken(CKEN13_FICP, 0);
586
587 DRCMR17 = 0;
588 DRCMR18 = 0;
589
590 local_irq_restore(flags);
591
592 /* power off board transceiver */
593 si->pdata->transceiver_mode(si->dev, IR_OFF);
594
595 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
596}
597
598static int pxa_irda_start(struct net_device *dev)
599{
600 struct pxa_irda *si = netdev_priv(dev);
601 int err;
602
603 si->speed = 9600;
604
605 err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
606 if (err)
607 goto err_irq1;
608
609 err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
610 if (err)
611 goto err_irq2;
612
613 /*
614 * The interrupt must remain disabled for now.
615 */
616 disable_irq(IRQ_STUART);
617 disable_irq(IRQ_ICP);
618
619 err = -EBUSY;
620 si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
621 if (si->rxdma < 0)
622 goto err_rx_dma;
623
624 si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
625 if (si->txdma < 0)
626 goto err_tx_dma;
627
628 err = -ENOMEM;
629 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
630 &si->dma_rx_buff_phy, GFP_KERNEL );
631 if (!si->dma_rx_buff)
632 goto err_dma_rx_buff;
633
634 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
635 &si->dma_tx_buff_phy, GFP_KERNEL );
636 if (!si->dma_tx_buff)
637 goto err_dma_tx_buff;
638
639 /* Setup the serial port for the initial speed. */
640 pxa_irda_startup(si);
641
642 /*
643 * Open a new IrLAP layer instance.
644 */
645 si->irlap = irlap_open(dev, &si->qos, "pxa");
646 err = -ENOMEM;
647 if (!si->irlap)
648 goto err_irlap;
649
650 /*
651 * Now enable the interrupt and start the queue
652 */
653 enable_irq(IRQ_STUART);
654 enable_irq(IRQ_ICP);
655 netif_start_queue(dev);
656
657 printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
658
659 return 0;
660
661err_irlap:
662 pxa_irda_shutdown(si);
663 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
664err_dma_tx_buff:
665 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
666err_dma_rx_buff:
667 pxa_free_dma(si->txdma);
668err_tx_dma:
669 pxa_free_dma(si->rxdma);
670err_rx_dma:
671 free_irq(IRQ_ICP, dev);
672err_irq2:
673 free_irq(IRQ_STUART, dev);
674err_irq1:
675
676 return err;
677}
678
679static int pxa_irda_stop(struct net_device *dev)
680{
681 struct pxa_irda *si = netdev_priv(dev);
682
683 netif_stop_queue(dev);
684
685 pxa_irda_shutdown(si);
686
687 /* Stop IrLAP */
688 if (si->irlap) {
689 irlap_close(si->irlap);
690 si->irlap = NULL;
691 }
692
693 free_irq(IRQ_STUART, dev);
694 free_irq(IRQ_ICP, dev);
695
696 pxa_free_dma(si->rxdma);
697 pxa_free_dma(si->txdma);
698
699 if (si->dma_rx_buff)
700 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
701 if (si->dma_tx_buff)
702 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
703
704 printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
705 return 0;
706}
707
708static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
709{
710 struct net_device *dev = dev_get_drvdata(_dev);
711 struct pxa_irda *si;
712
713 if (dev && netif_running(dev)) {
714 si = netdev_priv(dev);
715 netif_device_detach(dev);
716 pxa_irda_shutdown(si);
717 }
718
719 return 0;
720}
721
722static int pxa_irda_resume(struct device *_dev)
723{
724 struct net_device *dev = dev_get_drvdata(_dev);
725 struct pxa_irda *si;
726
727 if (dev && netif_running(dev)) {
728 si = netdev_priv(dev);
729 pxa_irda_startup(si);
730 netif_device_attach(dev);
731 netif_wake_queue(dev);
732 }
733
734 return 0;
735}
736
737
738static int pxa_irda_init_iobuf(iobuff_t *io, int size)
739{
740 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
741 if (io->head != NULL) {
742 io->truesize = size;
743 io->in_frame = FALSE;
744 io->state = OUTSIDE_FRAME;
745 io->data = io->head;
746 }
747 return io->head ? 0 : -ENOMEM;
748}
749
750static int pxa_irda_probe(struct device *_dev)
751{
752 struct platform_device *pdev = to_platform_device(_dev);
753 struct net_device *dev;
754 struct pxa_irda *si;
755 unsigned int baudrate_mask;
756 int err;
757
758 if (!pdev->dev.platform_data)
759 return -ENODEV;
760
761 err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
762 if (err)
763 goto err_mem_1;
764
765 err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
766 if (err)
767 goto err_mem_2;
768
769 dev = alloc_irdadev(sizeof(struct pxa_irda));
770 if (!dev)
771 goto err_mem_3;
772
773 si = netdev_priv(dev);
774 si->dev = &pdev->dev;
775 si->pdata = pdev->dev.platform_data;
776
777 /*
778 * Initialise the SIR buffers
779 */
780 err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
781 if (err)
782 goto err_mem_4;
783 err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
784 if (err)
785 goto err_mem_5;
786
787 dev->hard_start_xmit = pxa_irda_hard_xmit;
788 dev->open = pxa_irda_start;
789 dev->stop = pxa_irda_stop;
790 dev->do_ioctl = pxa_irda_ioctl;
791 dev->get_stats = pxa_irda_stats;
792
793 irda_init_max_qos_capabilies(&si->qos);
794
795 baudrate_mask = 0;
796 if (si->pdata->transceiver_cap & IR_SIRMODE)
797 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
798 if (si->pdata->transceiver_cap & IR_FIRMODE)
799 baudrate_mask |= IR_4000000 << 8;
800
801 si->qos.baud_rate.bits &= baudrate_mask;
802 si->qos.min_turn_time.bits = 7; /* 1ms or more */
803
804 irda_qos_bits_to_value(&si->qos);
805
806 err = register_netdev(dev);
807
808 if (err == 0)
809 dev_set_drvdata(&pdev->dev, dev);
810
811 if (err) {
812 kfree(si->tx_buff.head);
813err_mem_5:
814 kfree(si->rx_buff.head);
815err_mem_4:
816 free_netdev(dev);
817err_mem_3:
818 release_mem_region(__PREG(FICP), 0x1c);
819err_mem_2:
820 release_mem_region(__PREG(STUART), 0x24);
821 }
822err_mem_1:
823 return err;
824}
825
826static int pxa_irda_remove(struct device *_dev)
827{
828 struct net_device *dev = dev_get_drvdata(_dev);
829
830 if (dev) {
831 struct pxa_irda *si = netdev_priv(dev);
832 unregister_netdev(dev);
833 kfree(si->tx_buff.head);
834 kfree(si->rx_buff.head);
835 free_netdev(dev);
836 }
837
838 release_mem_region(__PREG(STUART), 0x24);
839 release_mem_region(__PREG(FICP), 0x1c);
840
841 return 0;
842}
843
844static struct device_driver pxa_ir_driver = {
845 .name = "pxa2xx-ir",
846 .bus = &platform_bus_type,
847 .probe = pxa_irda_probe,
848 .remove = pxa_irda_remove,
849 .suspend = pxa_irda_suspend,
850 .resume = pxa_irda_resume,
851};
852
853static int __init pxa_irda_init(void)
854{
855 return driver_register(&pxa_ir_driver);
856}
857
858static void __exit pxa_irda_exit(void)
859{
860 driver_unregister(&pxa_ir_driver);
861}
862
863module_init(pxa_irda_init);
864module_exit(pxa_irda_exit);
865
866MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 8d34ac60d906..76e0b9fb5e96 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -29,7 +29,7 @@
29#include <linux/rtnetlink.h> 29#include <linux/rtnetlink.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/device.h> 32#include <linux/platform_device.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34 34
35#include <net/irda/irda.h> 35#include <net/irda/irda.h>
@@ -291,12 +291,12 @@ static void sa1100_irda_shutdown(struct sa1100_irda *si)
291/* 291/*
292 * Suspend the IrDA interface. 292 * Suspend the IrDA interface.
293 */ 293 */
294static int sa1100_irda_suspend(struct device *_dev, pm_message_t state, u32 level) 294static int sa1100_irda_suspend(struct device *_dev, pm_message_t state)
295{ 295{
296 struct net_device *dev = dev_get_drvdata(_dev); 296 struct net_device *dev = dev_get_drvdata(_dev);
297 struct sa1100_irda *si; 297 struct sa1100_irda *si;
298 298
299 if (!dev || level != SUSPEND_DISABLE) 299 if (!dev)
300 return 0; 300 return 0;
301 301
302 si = dev->priv; 302 si = dev->priv;
@@ -316,12 +316,12 @@ static int sa1100_irda_suspend(struct device *_dev, pm_message_t state, u32 leve
316/* 316/*
317 * Resume the IrDA interface. 317 * Resume the IrDA interface.
318 */ 318 */
319static int sa1100_irda_resume(struct device *_dev, u32 level) 319static int sa1100_irda_resume(struct device *_dev)
320{ 320{
321 struct net_device *dev = dev_get_drvdata(_dev); 321 struct net_device *dev = dev_get_drvdata(_dev);
322 struct sa1100_irda *si; 322 struct sa1100_irda *si;
323 323
324 if (!dev || level != RESUME_ENABLE) 324 if (!dev)
325 return 0; 325 return 0;
326 326
327 si = dev->priv; 327 si = dev->priv;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index efc5a8870565..df22b8b532e7 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -490,8 +490,7 @@ static void sirdev_free_buffers(struct sir_dev *dev)
490{ 490{
491 if (dev->rx_buff.skb) 491 if (dev->rx_buff.skb)
492 kfree_skb(dev->rx_buff.skb); 492 kfree_skb(dev->rx_buff.skb);
493 if (dev->tx_buff.head) 493 kfree(dev->tx_buff.head);
494 kfree(dev->tx_buff.head);
495 dev->rx_buff.head = dev->tx_buff.head = NULL; 494 dev->rx_buff.head = dev->tx_buff.head = NULL;
496 dev->rx_buff.skb = NULL; 495 dev->rx_buff.skb = NULL;
497} 496}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index dd89bda1f131..a1d207f2fa68 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -53,6 +53,7 @@
53#include <linux/rtnetlink.h> 53#include <linux/rtnetlink.h>
54#include <linux/serial_reg.h> 54#include <linux/serial_reg.h>
55#include <linux/dma-mapping.h> 55#include <linux/dma-mapping.h>
56#include <linux/platform_device.h>
56 57
57#include <asm/io.h> 58#include <asm/io.h>
58#include <asm/dma.h> 59#include <asm/dma.h>
@@ -213,8 +214,8 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
213 214
214/* Power Management */ 215/* Power Management */
215 216
216static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level); 217static int smsc_ircc_suspend(struct device *dev, pm_message_t state);
217static int smsc_ircc_resume(struct device *dev, u32 level); 218static int smsc_ircc_resume(struct device *dev);
218 219
219static struct device_driver smsc_ircc_driver = { 220static struct device_driver smsc_ircc_driver = {
220 .name = SMSC_IRCC2_DRIVER_NAME, 221 .name = SMSC_IRCC2_DRIVER_NAME,
@@ -638,21 +639,14 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
638 */ 639 */
639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self) 640static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
640{ 641{
641 int iobase, ir_mode, ctrl, fast; 642 int iobase = self->io.fir_base;
642
643 IRDA_ASSERT(self != NULL, return;);
644
645 iobase = self->io.fir_base;
646 ir_mode = IRCC_CFGA_IRDA_SIR_A;
647 ctrl = 0;
648 fast = 0;
649 643
650 register_bank(iobase, 0); 644 register_bank(iobase, 0);
651 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER); 645 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
652 outb(0x00, iobase + IRCC_MASTER); 646 outb(0x00, iobase + IRCC_MASTER);
653 647
654 register_bank(iobase, 1); 648 register_bank(iobase, 1);
655 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | ir_mode), 649 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | IRCC_CFGA_IRDA_SIR_A),
656 iobase + IRCC_SCE_CFGA); 650 iobase + IRCC_SCE_CFGA);
657 651
658#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */ 652#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
@@ -666,10 +660,10 @@ static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
666 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD); 660 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
667 661
668 register_bank(iobase, 4); 662 register_bank(iobase, 4);
669 outb((inb(iobase + IRCC_CONTROL) & 0x30) | ctrl, iobase + IRCC_CONTROL); 663 outb((inb(iobase + IRCC_CONTROL) & 0x30), iobase + IRCC_CONTROL);
670 664
671 register_bank(iobase, 0); 665 register_bank(iobase, 0);
672 outb(fast, iobase + IRCC_LCR_A); 666 outb(0, iobase + IRCC_LCR_A);
673 667
674 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 668 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
675 669
@@ -1556,6 +1550,46 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1556} 1550}
1557#endif /* unused */ 1551#endif /* unused */
1558 1552
1553static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
1554{
1555 int error;
1556
1557 error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
1558 self->netdev->name, self->netdev);
1559 if (error)
1560 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
1561 __FUNCTION__, self->io.irq, error);
1562
1563 return error;
1564}
1565
1566static void smsc_ircc_start_interrupts(struct smsc_ircc_cb *self)
1567{
1568 unsigned long flags;
1569
1570 spin_lock_irqsave(&self->lock, flags);
1571
1572 self->io.speed = 0;
1573 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1574
1575 spin_unlock_irqrestore(&self->lock, flags);
1576}
1577
1578static void smsc_ircc_stop_interrupts(struct smsc_ircc_cb *self)
1579{
1580 int iobase = self->io.fir_base;
1581 unsigned long flags;
1582
1583 spin_lock_irqsave(&self->lock, flags);
1584
1585 register_bank(iobase, 0);
1586 outb(0, iobase + IRCC_IER);
1587 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1588 outb(0x00, iobase + IRCC_MASTER);
1589
1590 spin_unlock_irqrestore(&self->lock, flags);
1591}
1592
1559 1593
1560/* 1594/*
1561 * Function smsc_ircc_net_open (dev) 1595 * Function smsc_ircc_net_open (dev)
@@ -1567,7 +1601,6 @@ static int smsc_ircc_net_open(struct net_device *dev)
1567{ 1601{
1568 struct smsc_ircc_cb *self; 1602 struct smsc_ircc_cb *self;
1569 char hwname[16]; 1603 char hwname[16];
1570 unsigned long flags;
1571 1604
1572 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1605 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1573 1606
@@ -1575,6 +1608,11 @@ static int smsc_ircc_net_open(struct net_device *dev)
1575 self = netdev_priv(dev); 1608 self = netdev_priv(dev);
1576 IRDA_ASSERT(self != NULL, return 0;); 1609 IRDA_ASSERT(self != NULL, return 0;);
1577 1610
1611 if (self->io.suspended) {
1612 IRDA_DEBUG(0, "%s(), device is suspended\n", __FUNCTION__);
1613 return -EAGAIN;
1614 }
1615
1578 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1616 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1579 (void *) dev)) { 1617 (void *) dev)) {
1580 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1618 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
@@ -1582,11 +1620,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1582 return -EAGAIN; 1620 return -EAGAIN;
1583 } 1621 }
1584 1622
1585 spin_lock_irqsave(&self->lock, flags); 1623 smsc_ircc_start_interrupts(self);
1586 /*smsc_ircc_sir_start(self);*/
1587 self->io.speed = 0;
1588 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1589 spin_unlock_irqrestore(&self->lock, flags);
1590 1624
1591 /* Give self a hardware name */ 1625 /* Give self a hardware name */
1592 /* It would be cool to offer the chip revision here - Jean II */ 1626 /* It would be cool to offer the chip revision here - Jean II */
@@ -1639,37 +1673,63 @@ static int smsc_ircc_net_close(struct net_device *dev)
1639 irlap_close(self->irlap); 1673 irlap_close(self->irlap);
1640 self->irlap = NULL; 1674 self->irlap = NULL;
1641 1675
1642 free_irq(self->io.irq, dev); 1676 smsc_ircc_stop_interrupts(self);
1677
1678 /* if we are called from smsc_ircc_resume we don't have IRQ reserved */
1679 if (!self->io.suspended)
1680 free_irq(self->io.irq, dev);
1681
1643 disable_dma(self->io.dma); 1682 disable_dma(self->io.dma);
1644 free_dma(self->io.dma); 1683 free_dma(self->io.dma);
1645 1684
1646 return 0; 1685 return 0;
1647} 1686}
1648 1687
1649static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level) 1688static int smsc_ircc_suspend(struct device *dev, pm_message_t state)
1650{ 1689{
1651 struct smsc_ircc_cb *self = dev_get_drvdata(dev); 1690 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1652 1691
1653 IRDA_MESSAGE("%s, Suspending\n", driver_name); 1692 if (!self->io.suspended) {
1693 IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
1654 1694
1655 if (level == SUSPEND_DISABLE && !self->io.suspended) { 1695 rtnl_lock();
1656 smsc_ircc_net_close(self->netdev); 1696 if (netif_running(self->netdev)) {
1697 netif_device_detach(self->netdev);
1698 smsc_ircc_stop_interrupts(self);
1699 free_irq(self->io.irq, self->netdev);
1700 disable_dma(self->io.dma);
1701 }
1657 self->io.suspended = 1; 1702 self->io.suspended = 1;
1703 rtnl_unlock();
1658 } 1704 }
1659 1705
1660 return 0; 1706 return 0;
1661} 1707}
1662 1708
1663static int smsc_ircc_resume(struct device *dev, u32 level) 1709static int smsc_ircc_resume(struct device *dev)
1664{ 1710{
1665 struct smsc_ircc_cb *self = dev_get_drvdata(dev); 1711 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1666 1712
1667 if (level == RESUME_ENABLE && self->io.suspended) { 1713 if (self->io.suspended) {
1668 1714 IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
1669 smsc_ircc_net_open(self->netdev); 1715
1716 rtnl_lock();
1717 smsc_ircc_init_chip(self);
1718 if (netif_running(self->netdev)) {
1719 if (smsc_ircc_request_irq(self)) {
1720 /*
1721 * Don't fail resume process, just kill this
1722 * network interface
1723 */
1724 unregister_netdevice(self->netdev);
1725 } else {
1726 enable_dma(self->io.dma);
1727 smsc_ircc_start_interrupts(self);
1728 netif_device_attach(self->netdev);
1729 }
1730 }
1670 self->io.suspended = 0; 1731 self->io.suspended = 0;
1671 1732 rtnl_unlock();
1672 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1673 } 1733 }
1674 return 0; 1734 return 0;
1675} 1735}
@@ -1682,9 +1742,6 @@ static int smsc_ircc_resume(struct device *dev, u32 level)
1682 */ 1742 */
1683static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) 1743static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1684{ 1744{
1685 int iobase;
1686 unsigned long flags;
1687
1688 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1745 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1689 1746
1690 IRDA_ASSERT(self != NULL, return -1;); 1747 IRDA_ASSERT(self != NULL, return -1;);
@@ -1694,22 +1751,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1694 /* Remove netdevice */ 1751 /* Remove netdevice */
1695 unregister_netdev(self->netdev); 1752 unregister_netdev(self->netdev);
1696 1753
1697 /* Make sure the irq handler is not exectuting */ 1754 smsc_ircc_stop_interrupts(self);
1698 spin_lock_irqsave(&self->lock, flags);
1699
1700 /* Stop interrupts */
1701 iobase = self->io.fir_base;
1702 register_bank(iobase, 0);
1703 outb(0, iobase + IRCC_IER);
1704 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1705 outb(0x00, iobase + IRCC_MASTER);
1706#if 0
1707 /* Reset to SIR mode */
1708 register_bank(iobase, 1);
1709 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase + IRCC_SCE_CFGA);
1710 outb(IRCC_CFGB_IR, iobase + IRCC_SCE_CFGB);
1711#endif
1712 spin_unlock_irqrestore(&self->lock, flags);
1713 1755
1714 /* Release the PORTS that this driver is using */ 1756 /* Release the PORTS that this driver is using */
1715 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1757 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 651c5a6578fd..a9f49f058cfb 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -473,8 +473,7 @@ static int vlsi_free_ring(struct vlsi_ring *r)
473 rd_set_addr_status(rd, 0, 0); 473 rd_set_addr_status(rd, 0, 0);
474 if (busaddr) 474 if (busaddr)
475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
476 if (rd->buf) 476 kfree(rd->buf);
477 kfree(rd->buf);
478 } 477 }
479 kfree(r); 478 kfree(r);
480 return 0; 479 return 0;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 3d56cf5a4e23..d86d8f055a6c 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -70,13 +70,14 @@
70#include <linux/delay.h> 70#include <linux/delay.h>
71#include <linux/mm.h> 71#include <linux/mm.h>
72#include <linux/ethtool.h> 72#include <linux/ethtool.h>
73#include <asm/iSeries/mf.h> 73
74#include <asm/iSeries/iSeries_pci.h> 74#include <asm/abs_addr.h>
75#include <asm/iseries/mf.h>
75#include <asm/uaccess.h> 76#include <asm/uaccess.h>
76 77
77#include <asm/iSeries/HvLpConfig.h> 78#include <asm/iseries/hv_lp_config.h>
78#include <asm/iSeries/HvTypes.h> 79#include <asm/iseries/hv_types.h>
79#include <asm/iSeries/HvLpEvent.h> 80#include <asm/iseries/hv_lp_event.h>
80#include <asm/iommu.h> 81#include <asm/iommu.h>
81#include <asm/vio.h> 82#include <asm/vio.h>
82 83
@@ -1397,13 +1398,13 @@ static inline void veth_build_dma_list(struct dma_chunk *list,
1397 * it just at the granularity of iSeries real->absolute 1398 * it just at the granularity of iSeries real->absolute
1398 * mapping? Indeed, given the way the allocator works, can we 1399 * mapping? Indeed, given the way the allocator works, can we
1399 * count on them being absolutely contiguous? */ 1400 * count on them being absolutely contiguous? */
1400 list[0].addr = ISERIES_HV_ADDR(p); 1401 list[0].addr = iseries_hv_addr(p);
1401 list[0].size = min(length, 1402 list[0].size = min(length,
1402 PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK)); 1403 PAGE_SIZE - ((unsigned long)p & ~PAGE_MASK));
1403 1404
1404 done = list[0].size; 1405 done = list[0].size;
1405 while (done < length) { 1406 while (done < length) {
1406 list[i].addr = ISERIES_HV_ADDR(p + done); 1407 list[i].addr = iseries_hv_addr(p + done);
1407 list[i].size = min(length-done, PAGE_SIZE); 1408 list[i].size = min(length-done, PAGE_SIZE);
1408 done += list[i].size; 1409 done += list[i].size;
1409 i++; 1410 i++;
@@ -1496,8 +1497,8 @@ static void veth_receive(struct veth_lpar_connection *cnx,
1496 cnx->dst_inst, 1497 cnx->dst_inst,
1497 HvLpDma_AddressType_RealAddress, 1498 HvLpDma_AddressType_RealAddress,
1498 HvLpDma_AddressType_TceIndex, 1499 HvLpDma_AddressType_TceIndex,
1499 ISERIES_HV_ADDR(&local_list), 1500 iseries_hv_addr(&local_list),
1500 ISERIES_HV_ADDR(&remote_list), 1501 iseries_hv_addr(&remote_list),
1501 length); 1502 length);
1502 if (rc != HvLpDma_Rc_Good) { 1503 if (rc != HvLpDma_Rc_Good) {
1503 dev_kfree_skb_irq(skb); 1504 dev_kfree_skb_irq(skb);
@@ -1647,10 +1648,13 @@ static struct vio_device_id veth_device_table[] __devinitdata = {
1647MODULE_DEVICE_TABLE(vio, veth_device_table); 1648MODULE_DEVICE_TABLE(vio, veth_device_table);
1648 1649
1649static struct vio_driver veth_driver = { 1650static struct vio_driver veth_driver = {
1650 .name = DRV_NAME,
1651 .id_table = veth_device_table, 1651 .id_table = veth_device_table,
1652 .probe = veth_probe, 1652 .probe = veth_probe,
1653 .remove = veth_remove 1653 .remove = veth_remove,
1654 .driver = {
1655 .name = DRV_NAME,
1656 .owner = THIS_MODULE,
1657 }
1654}; 1658};
1655 1659
1656/* 1660/*
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 8423cb6875f0..a74a5cfaf5bc 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -33,7 +33,7 @@
33#include <linux/netdevice.h> 33#include <linux/netdevice.h>
34#include <linux/etherdevice.h> 34#include <linux/etherdevice.h>
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <linux/device.h> 36#include <linux/platform_device.h>
37#include <linux/dma-mapping.h> 37#include <linux/dma-mapping.h>
38 38
39#include <asm/bootinfo.h> 39#include <asm/bootinfo.h>
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 41bad07ac1ac..f7b7238d8352 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -415,6 +415,10 @@ static int rx_ring_size = RX_RING_SIZE;
415static int ticks_limit = 100; 415static int ticks_limit = 100;
416static int max_cmd_backlog = TX_RING_SIZE-1; 416static int max_cmd_backlog = TX_RING_SIZE-1;
417 417
418#ifdef CONFIG_NET_POLL_CONTROLLER
419static void i596_poll_controller(struct net_device *dev);
420#endif
421
418 422
419static inline void CA(struct net_device *dev) 423static inline void CA(struct net_device *dev)
420{ 424{
@@ -636,11 +640,11 @@ static int init_i596_mem(struct net_device *dev)
636 640
637 disable_irq(dev->irq); /* disable IRQs from LAN */ 641 disable_irq(dev->irq); /* disable IRQs from LAN */
638 DEB(DEB_INIT, 642 DEB(DEB_INIT,
639 printk("RESET 82596 port: %p (with IRQ %d disabled)\n", 643 printk("RESET 82596 port: %lx (with IRQ %d disabled)\n",
640 (void*)(dev->base_addr + PA_I82596_RESET), 644 (dev->base_addr + PA_I82596_RESET),
641 dev->irq)); 645 dev->irq));
642 646
643 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */ 647 gsc_writel(0, (dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
644 udelay(100); /* Wait 100us - seems to help */ 648 udelay(100); /* Wait 100us - seems to help */
645 649
646 /* change the scp address */ 650 /* change the scp address */
@@ -1209,6 +1213,9 @@ static int __devinit i82596_probe(struct net_device *dev,
1209 dev->set_multicast_list = set_multicast_list; 1213 dev->set_multicast_list = set_multicast_list;
1210 dev->tx_timeout = i596_tx_timeout; 1214 dev->tx_timeout = i596_tx_timeout;
1211 dev->watchdog_timeo = TX_TIMEOUT; 1215 dev->watchdog_timeo = TX_TIMEOUT;
1216#ifdef CONFIG_NET_POLL_CONTROLLER
1217 dev->poll_controller = i596_poll_controller;
1218#endif
1212 1219
1213 dev->priv = (void *)(dev->mem_start); 1220 dev->priv = (void *)(dev->mem_start);
1214 1221
@@ -1242,6 +1249,14 @@ static int __devinit i82596_probe(struct net_device *dev,
1242 return 0; 1249 return 0;
1243} 1250}
1244 1251
1252#ifdef CONFIG_NET_POLL_CONTROLLER
1253static void i596_poll_controller(struct net_device *dev)
1254{
1255 disable_irq(dev->irq);
1256 i596_interrupt(dev->irq, dev, NULL);
1257 enable_irq(dev->irq);
1258}
1259#endif
1245 1260
1246static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1261static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1247{ 1262{
@@ -1528,17 +1543,18 @@ lan_init_chip(struct parisc_device *dev)
1528 1543
1529 if (!dev->irq) { 1544 if (!dev->irq) {
1530 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", 1545 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
1531 __FILE__, dev->hpa); 1546 __FILE__, dev->hpa.start);
1532 return -ENODEV; 1547 return -ENODEV;
1533 } 1548 }
1534 1549
1535 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq); 1550 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
1551 dev->irq);
1536 1552
1537 netdevice = alloc_etherdev(0); 1553 netdevice = alloc_etherdev(0);
1538 if (!netdevice) 1554 if (!netdevice)
1539 return -ENOMEM; 1555 return -ENOMEM;
1540 1556
1541 netdevice->base_addr = dev->hpa; 1557 netdevice->base_addr = dev->hpa.start;
1542 netdevice->irq = dev->irq; 1558 netdevice->irq = dev->irq;
1543 1559
1544 retval = i82596_probe(netdevice, &dev->dev); 1560 retval = i82596_probe(netdevice, &dev->dev);
@@ -1566,7 +1582,7 @@ static struct parisc_device_id lan_tbl[] = {
1566MODULE_DEVICE_TABLE(parisc, lan_tbl); 1582MODULE_DEVICE_TABLE(parisc, lan_tbl);
1567 1583
1568static struct parisc_driver lan_driver = { 1584static struct parisc_driver lan_driver = {
1569 .name = "Apricot", 1585 .name = "lasi_82596",
1570 .id_table = lan_tbl, 1586 .id_table = lan_tbl,
1571 .probe = lan_init_chip, 1587 .probe = lan_init_chip,
1572}; 1588};
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 81d0a26e4f41..2a5add257b8f 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -1016,6 +1016,7 @@ static struct of_device_id mace_match[] =
1016 }, 1016 },
1017 {}, 1017 {},
1018}; 1018};
1019MODULE_DEVICE_TABLE (of, mace_match);
1019 1020
1020static struct macio_driver mace_driver = 1021static struct macio_driver mace_driver =
1021{ 1022{
@@ -1035,10 +1036,8 @@ static void __exit mace_cleanup(void)
1035{ 1036{
1036 macio_unregister_driver(&mace_driver); 1037 macio_unregister_driver(&mace_driver);
1037 1038
1038 if (dummy_buf) { 1039 kfree(dummy_buf);
1039 kfree(dummy_buf); 1040 dummy_buf = NULL;
1040 dummy_buf = NULL;
1041 }
1042} 1041}
1043 1042
1044MODULE_AUTHOR("Paul Mackerras"); 1043MODULE_AUTHOR("Paul Mackerras");
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index 405e18365ede..e9c999d7eb39 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -47,7 +47,7 @@
47#include <linux/netdevice.h> 47#include <linux/netdevice.h>
48#include <linux/etherdevice.h> 48#include <linux/etherdevice.h>
49#include <linux/skbuff.h> 49#include <linux/skbuff.h>
50#include <linux/device.h> 50#include <linux/platform_device.h>
51#include <linux/dma-mapping.h> 51#include <linux/dma-mapping.h>
52 52
53#include <asm/bootinfo.h> 53#include <asm/bootinfo.h>
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index f79f7ee72ab8..bbffb585b3b3 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -13,6 +13,7 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/platform_device.h>
16#include <asm/io.h> 17#include <asm/io.h>
17#include <asm/mips-boards/simint.h> 18#include <asm/mips-boards/simint.h>
18 19
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 25c9a99c377b..71f2c6705bc3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -39,6 +39,8 @@
39#include <linux/bitops.h> 39#include <linux/bitops.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/platform_device.h>
43
42#include <asm/io.h> 44#include <asm/io.h>
43#include <asm/types.h> 45#include <asm/types.h>
44#include <asm/pgtable.h> 46#include <asm/pgtable.h>
@@ -1533,6 +1535,9 @@ static int mv643xx_eth_probe(struct device *ddev)
1533 printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name); 1535 printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name);
1534#endif 1536#endif
1535 1537
1538 if (mp->tx_sram_size > 0)
1539 printk(KERN_NOTICE "%s: Using SRAM\n", dev->name);
1540
1536 return 0; 1541 return 0;
1537 1542
1538out: 1543out:
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index e531a4eedfee..d11821dd86ed 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -675,7 +675,6 @@ static int ne2k_pci_resume (struct pci_dev *pdev)
675 pci_set_power_state(pdev, 0); 675 pci_set_power_state(pdev, 0);
676 pci_restore_state(pdev); 676 pci_restore_state(pdev);
677 pci_enable_device(pdev); 677 pci_enable_device(pdev);
678 pci_set_master(pdev);
679 NS8390_init(dev, 1); 678 NS8390_init(dev, 1);
680 netif_device_attach(dev); 679 netif_device_attach(dev);
681 680
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 925d1dfcc4dc..bb42ff218484 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -696,8 +696,7 @@ static void ni65_free_buffer(struct priv *p)
696 return; 696 return;
697 697
698 for(i=0;i<TMDNUM;i++) { 698 for(i=0;i<TMDNUM;i++) {
699 if(p->tmdbounce[i]) 699 kfree(p->tmdbounce[i]);
700 kfree(p->tmdbounce[i]);
701#ifdef XMT_VIA_SKB 700#ifdef XMT_VIA_SKB
702 if(p->tmd_skb[i]) 701 if(p->tmd_skb[i])
703 dev_kfree_skb(p->tmd_skb[i]); 702 dev_kfree_skb(p->tmd_skb[i]);
@@ -710,12 +709,10 @@ static void ni65_free_buffer(struct priv *p)
710 if(p->recv_skb[i]) 709 if(p->recv_skb[i])
711 dev_kfree_skb(p->recv_skb[i]); 710 dev_kfree_skb(p->recv_skb[i]);
712#else 711#else
713 if(p->recvbounce[i]) 712 kfree(p->recvbounce[i]);
714 kfree(p->recvbounce[i]);
715#endif 713#endif
716 } 714 }
717 if(p->self) 715 kfree(p->self);
718 kfree(p->self);
719} 716}
720 717
721 718
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 9f22d138e3ad..818c185d6438 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1020,6 +1020,12 @@ static void set_misc_reg(struct net_device *dev)
1020 } else { 1020 } else {
1021 outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); 1021 outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
1022 } 1022 }
1023 } else if (info->flags & IS_DL10019) {
1024 /* Advertise 100F, 100H, 10F, 10H */
1025 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
1026 /* Restart MII autonegotiation */
1027 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
1028 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
1023 } 1029 }
1024} 1030}
1025 1031
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 70fe81a89df9..be319229f543 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.31a" 25#define DRV_VERSION "1.31c"
26#define DRV_RELDATE "12.Sep.2005" 26#define DRV_RELDATE "01.Nov.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char *version =
@@ -260,6 +260,11 @@ static int homepna[MAX_UNITS];
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam(). 260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4 261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
262 * to allow loopback test to work unchanged. 262 * to allow loopback test to work unchanged.
263 * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device
264 * if allocation fails
265 * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only.
266 * Force 100Mbit FD if Auto (ASEL) is selected.
267 * See Bugzilla 2669 and 4551.
263 */ 268 */
264 269
265 270
@@ -408,7 +413,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
408static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 413static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
409 void *ptr); 414 void *ptr);
410static void pcnet32_purge_tx_ring(struct net_device *dev); 415static void pcnet32_purge_tx_ring(struct net_device *dev);
411static int pcnet32_alloc_ring(struct net_device *dev); 416static int pcnet32_alloc_ring(struct net_device *dev, char *name);
412static void pcnet32_free_ring(struct net_device *dev); 417static void pcnet32_free_ring(struct net_device *dev);
413 418
414 419
@@ -669,15 +674,17 @@ static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringpara
669 lp->rx_mod_mask = lp->rx_ring_size - 1; 674 lp->rx_mod_mask = lp->rx_ring_size - 1;
670 lp->rx_len_bits = (i << 4); 675 lp->rx_len_bits = (i << 4);
671 676
672 if (pcnet32_alloc_ring(dev)) { 677 if (pcnet32_alloc_ring(dev, dev->name)) {
673 pcnet32_free_ring(dev); 678 pcnet32_free_ring(dev);
679 spin_unlock_irqrestore(&lp->lock, flags);
674 return -ENOMEM; 680 return -ENOMEM;
675 } 681 }
676 682
677 spin_unlock_irqrestore(&lp->lock, flags); 683 spin_unlock_irqrestore(&lp->lock, flags);
678 684
679 if (pcnet32_debug & NETIF_MSG_DRV) 685 if (pcnet32_debug & NETIF_MSG_DRV)
680 printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size); 686 printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n",
687 dev->name, lp->rx_ring_size, lp->tx_ring_size);
681 688
682 if (netif_running(dev)) 689 if (netif_running(dev))
683 pcnet32_open(dev); 690 pcnet32_open(dev);
@@ -981,7 +988,11 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
981 *buff++ = a->read_csr(ioaddr, 114); 988 *buff++ = a->read_csr(ioaddr, 114);
982 989
983 /* read bus configuration registers */ 990 /* read bus configuration registers */
984 for (i=0; i<36; i++) { 991 for (i=0; i<30; i++) {
992 *buff++ = a->read_bcr(ioaddr, i);
993 }
994 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
995 for (i=31; i<36; i++) {
985 *buff++ = a->read_bcr(ioaddr, i); 996 *buff++ = a->read_bcr(ioaddr, i);
986 } 997 }
987 998
@@ -1340,7 +1351,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1340 } 1351 }
1341 lp->a = *a; 1352 lp->a = *a;
1342 1353
1343 if (pcnet32_alloc_ring(dev)) { 1354 /* prior to register_netdev, dev->name is not yet correct */
1355 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1344 ret = -ENOMEM; 1356 ret = -ENOMEM;
1345 goto err_free_ring; 1357 goto err_free_ring;
1346 } 1358 }
@@ -1448,48 +1460,63 @@ err_release_region:
1448} 1460}
1449 1461
1450 1462
1451static int pcnet32_alloc_ring(struct net_device *dev) 1463/* if any allocation fails, caller must also call pcnet32_free_ring */
1464static int pcnet32_alloc_ring(struct net_device *dev, char *name)
1452{ 1465{
1453 struct pcnet32_private *lp = dev->priv; 1466 struct pcnet32_private *lp = dev->priv;
1454 1467
1455 if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, 1468 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1456 &lp->tx_ring_dma_addr)) == NULL) { 1469 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1470 &lp->tx_ring_dma_addr);
1471 if (lp->tx_ring == NULL) {
1457 if (pcnet32_debug & NETIF_MSG_DRV) 1472 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); 1473 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
1474 name);
1459 return -ENOMEM; 1475 return -ENOMEM;
1460 } 1476 }
1461 1477
1462 if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, 1478 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1463 &lp->rx_ring_dma_addr)) == NULL) { 1479 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1480 &lp->rx_ring_dma_addr);
1481 if (lp->rx_ring == NULL) {
1464 if (pcnet32_debug & NETIF_MSG_DRV) 1482 if (pcnet32_debug & NETIF_MSG_DRV)
1465 printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); 1483 printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n",
1484 name);
1466 return -ENOMEM; 1485 return -ENOMEM;
1467 } 1486 }
1468 1487
1469 if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) { 1488 lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size,
1489 GFP_ATOMIC);
1490 if (!lp->tx_dma_addr) {
1470 if (pcnet32_debug & NETIF_MSG_DRV) 1491 if (pcnet32_debug & NETIF_MSG_DRV)
1471 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1492 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1472 return -ENOMEM; 1493 return -ENOMEM;
1473 } 1494 }
1474 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); 1495 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1475 1496
1476 if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) { 1497 lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size,
1498 GFP_ATOMIC);
1499 if (!lp->rx_dma_addr) {
1477 if (pcnet32_debug & NETIF_MSG_DRV) 1500 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1501 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1479 return -ENOMEM; 1502 return -ENOMEM;
1480 } 1503 }
1481 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); 1504 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1482 1505
1483 if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) { 1506 lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size,
1507 GFP_ATOMIC);
1508 if (!lp->tx_skbuff) {
1484 if (pcnet32_debug & NETIF_MSG_DRV) 1509 if (pcnet32_debug & NETIF_MSG_DRV)
1485 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1510 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1486 return -ENOMEM; 1511 return -ENOMEM;
1487 } 1512 }
1488 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); 1513 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1489 1514
1490 if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) { 1515 lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size,
1516 GFP_ATOMIC);
1517 if (!lp->rx_skbuff) {
1491 if (pcnet32_debug & NETIF_MSG_DRV) 1518 if (pcnet32_debug & NETIF_MSG_DRV)
1492 printk(KERN_ERR PFX "Memory allocation failed.\n"); 1519 printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name);
1493 return -ENOMEM; 1520 return -ENOMEM;
1494 } 1521 }
1495 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); 1522 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
@@ -1592,12 +1619,18 @@ pcnet32_open(struct net_device *dev)
1592 val |= 0x10; 1619 val |= 0x10;
1593 lp->a.write_csr (ioaddr, 124, val); 1620 lp->a.write_csr (ioaddr, 124, val);
1594 1621
1595 /* Allied Telesyn AT 2700/2701 FX looses the link, so skip that */ 1622 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
1596 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && 1623 if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
1597 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || 1624 (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
1598 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { 1625 lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
1599 printk(KERN_DEBUG "%s: Skipping PHY selection.\n", dev->name); 1626 if (lp->options & PCNET32_PORT_ASEL) {
1600 } else { 1627 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
1628 if (netif_msg_link(lp))
1629 printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n",
1630 dev->name);
1631 }
1632 }
1633 {
1601 /* 1634 /*
1602 * 24 Jun 2004 according AMD, in order to change the PHY, 1635 * 24 Jun 2004 according AMD, in order to change the PHY,
1603 * DANAS (or DISPM for 79C976) must be set; then select the speed, 1636 * DANAS (or DISPM for 79C976) must be set; then select the speed,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 90630672703d..5eab9c42a111 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -61,6 +61,9 @@ int mdiobus_register(struct mii_bus *bus)
61 for (i = 0; i < PHY_MAX_ADDR; i++) { 61 for (i = 0; i < PHY_MAX_ADDR; i++) {
62 struct phy_device *phydev; 62 struct phy_device *phydev;
63 63
64 if (bus->phy_mask & (1 << i))
65 continue;
66
64 phydev = get_phy_device(bus, i); 67 phydev = get_phy_device(bus, i);
65 68
66 if (IS_ERR(phydev)) 69 if (IS_ERR(phydev))
@@ -133,13 +136,9 @@ static int mdio_bus_suspend(struct device * dev, pm_message_t state)
133 int ret = 0; 136 int ret = 0;
134 struct device_driver *drv = dev->driver; 137 struct device_driver *drv = dev->driver;
135 138
136 if (drv && drv->suspend) { 139 if (drv && drv->suspend)
137 ret = drv->suspend(dev, state, SUSPEND_DISABLE); 140 ret = drv->suspend(dev, state);
138 if (ret == 0) 141
139 ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
140 if (ret == 0)
141 ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
142 }
143 return ret; 142 return ret;
144} 143}
145 144
@@ -148,13 +147,9 @@ static int mdio_bus_resume(struct device * dev)
148 int ret = 0; 147 int ret = 0;
149 struct device_driver *drv = dev->driver; 148 struct device_driver *drv = dev->driver;
150 149
151 if (drv && drv->resume) { 150 if (drv && drv->resume)
152 ret = drv->resume(dev, RESUME_POWER_ON); 151 ret = drv->resume(dev);
153 if (ret == 0) 152
154 ret = drv->resume(dev, RESUME_RESTORE_STATE);
155 if (ret == 0)
156 ret = drv->resume(dev, RESUME_ENABLE);
157 }
158 return ret; 153 return ret;
159} 154}
160 155
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0df7e92b0bf8..d3c9958b00d0 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -863,7 +863,7 @@ static int __init ppp_init(void)
863 err = PTR_ERR(ppp_class); 863 err = PTR_ERR(ppp_class);
864 goto out_chrdev; 864 goto out_chrdev;
865 } 865 }
866 class_device_create(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 866 class_device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
867 err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0), 867 err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0),
868 S_IFCHR|S_IRUSR|S_IWUSR, "ppp"); 868 S_IFCHR|S_IRUSR|S_IWUSR, "ppp");
869 if (err) 869 if (err)
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index ec1a18d189a1..19c2df9c86fe 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1710,10 +1710,8 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1710 error = -EFAULT; 1710 error = -EFAULT;
1711 } 1711 }
1712 wf_out: 1712 wf_out:
1713 if (oldimage) 1713 kfree(oldimage);
1714 kfree(oldimage); 1714 kfree(image);
1715 if (image)
1716 kfree(image);
1717 return error; 1715 return error;
1718 1716
1719 case SIOCRRID: 1717 case SIOCRRID:
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index d303d162974f..9c4935407f26 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -30,6 +30,8 @@
30 * in the driver. 30 * in the driver.
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8. 32 * is also an array of size 8.
33 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
34 * values are 1, 2 and 3.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of 36 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO. 37 * Tx descriptors that can be associated with each corresponding FIFO.
@@ -65,12 +67,15 @@
65#include "s2io.h" 67#include "s2io.h"
66#include "s2io-regs.h" 68#include "s2io-regs.h"
67 69
68#define DRV_VERSION "Version 2.0.9.1" 70#define DRV_VERSION "Version 2.0.9.3"
69 71
70/* S2io Driver name & version. */ 72/* S2io Driver name & version. */
71static char s2io_driver_name[] = "Neterion"; 73static char s2io_driver_name[] = "Neterion";
72static char s2io_driver_version[] = DRV_VERSION; 74static char s2io_driver_version[] = DRV_VERSION;
73 75
76int rxd_size[4] = {32,48,48,64};
77int rxd_count[4] = {127,85,85,63};
78
74static inline int RXD_IS_UP2DT(RxD_t *rxdp) 79static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75{ 80{
76 int ret; 81 int ret;
@@ -104,7 +109,7 @@ static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
104 mac_control = &sp->mac_control; 109 mac_control = &sp->mac_control;
105 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) { 110 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
106 level = LOW; 111 level = LOW;
107 if (rxb_size <= MAX_RXDS_PER_BLOCK) { 112 if (rxb_size <= rxd_count[sp->rxd_mode]) {
108 level = PANIC; 113 level = PANIC;
109 } 114 }
110 } 115 }
@@ -296,6 +301,7 @@ static unsigned int rx_ring_sz[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 301 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297static unsigned int rts_frm_len[MAX_RX_RINGS] = 302static unsigned int rts_frm_len[MAX_RX_RINGS] =
298 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 303 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
304static unsigned int rx_ring_mode = 1;
299static unsigned int use_continuous_tx_intrs = 1; 305static unsigned int use_continuous_tx_intrs = 1;
300static unsigned int rmac_pause_time = 65535; 306static unsigned int rmac_pause_time = 65535;
301static unsigned int mc_pause_threshold_q0q3 = 187; 307static unsigned int mc_pause_threshold_q0q3 = 187;
@@ -304,6 +310,7 @@ static unsigned int shared_splits;
304static unsigned int tmac_util_period = 5; 310static unsigned int tmac_util_period = 5;
305static unsigned int rmac_util_period = 5; 311static unsigned int rmac_util_period = 5;
306static unsigned int bimodal = 0; 312static unsigned int bimodal = 0;
313static unsigned int l3l4hdr_size = 128;
307#ifndef CONFIG_S2IO_NAPI 314#ifndef CONFIG_S2IO_NAPI
308static unsigned int indicate_max_pkts; 315static unsigned int indicate_max_pkts;
309#endif 316#endif
@@ -357,10 +364,8 @@ static int init_shared_mem(struct s2io_nic *nic)
357 int i, j, blk_cnt, rx_sz, tx_sz; 364 int i, j, blk_cnt, rx_sz, tx_sz;
358 int lst_size, lst_per_page; 365 int lst_size, lst_per_page;
359 struct net_device *dev = nic->dev; 366 struct net_device *dev = nic->dev;
360#ifdef CONFIG_2BUFF_MODE
361 unsigned long tmp; 367 unsigned long tmp;
362 buffAdd_t *ba; 368 buffAdd_t *ba;
363#endif
364 369
365 mac_info_t *mac_control; 370 mac_info_t *mac_control;
366 struct config_param *config; 371 struct config_param *config;
@@ -458,7 +463,8 @@ static int init_shared_mem(struct s2io_nic *nic)
458 /* Allocation and initialization of RXDs in Rings */ 463 /* Allocation and initialization of RXDs in Rings */
459 size = 0; 464 size = 0;
460 for (i = 0; i < config->rx_ring_num; i++) { 465 for (i = 0; i < config->rx_ring_num; i++) {
461 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) { 466 if (config->rx_cfg[i].num_rxd %
467 (rxd_count[nic->rxd_mode] + 1)) {
462 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); 468 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
463 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", 469 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
464 i); 470 i);
@@ -467,11 +473,15 @@ static int init_shared_mem(struct s2io_nic *nic)
467 } 473 }
468 size += config->rx_cfg[i].num_rxd; 474 size += config->rx_cfg[i].num_rxd;
469 mac_control->rings[i].block_count = 475 mac_control->rings[i].block_count =
470 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 476 config->rx_cfg[i].num_rxd /
471 mac_control->rings[i].pkt_cnt = 477 (rxd_count[nic->rxd_mode] + 1 );
472 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count; 478 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
479 mac_control->rings[i].block_count;
473 } 480 }
474 size = (size * (sizeof(RxD_t))); 481 if (nic->rxd_mode == RXD_MODE_1)
482 size = (size * (sizeof(RxD1_t)));
483 else
484 size = (size * (sizeof(RxD3_t)));
475 rx_sz = size; 485 rx_sz = size;
476 486
477 for (i = 0; i < config->rx_ring_num; i++) { 487 for (i = 0; i < config->rx_ring_num; i++) {
@@ -486,15 +496,15 @@ static int init_shared_mem(struct s2io_nic *nic)
486 mac_control->rings[i].nic = nic; 496 mac_control->rings[i].nic = nic;
487 mac_control->rings[i].ring_no = i; 497 mac_control->rings[i].ring_no = i;
488 498
489 blk_cnt = 499 blk_cnt = config->rx_cfg[i].num_rxd /
490 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 500 (rxd_count[nic->rxd_mode] + 1);
491 /* Allocating all the Rx blocks */ 501 /* Allocating all the Rx blocks */
492 for (j = 0; j < blk_cnt; j++) { 502 for (j = 0; j < blk_cnt; j++) {
493#ifndef CONFIG_2BUFF_MODE 503 rx_block_info_t *rx_blocks;
494 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t)); 504 int l;
495#else 505
496 size = SIZE_OF_BLOCK; 506 rx_blocks = &mac_control->rings[i].rx_blocks[j];
497#endif 507 size = SIZE_OF_BLOCK; //size is always page size
498 tmp_v_addr = pci_alloc_consistent(nic->pdev, size, 508 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
499 &tmp_p_addr); 509 &tmp_p_addr);
500 if (tmp_v_addr == NULL) { 510 if (tmp_v_addr == NULL) {
@@ -504,11 +514,24 @@ static int init_shared_mem(struct s2io_nic *nic)
504 * memory that was alloced till the 514 * memory that was alloced till the
505 * failure happened. 515 * failure happened.
506 */ 516 */
507 mac_control->rings[i].rx_blocks[j].block_virt_addr = 517 rx_blocks->block_virt_addr = tmp_v_addr;
508 tmp_v_addr;
509 return -ENOMEM; 518 return -ENOMEM;
510 } 519 }
511 memset(tmp_v_addr, 0, size); 520 memset(tmp_v_addr, 0, size);
521 rx_blocks->block_virt_addr = tmp_v_addr;
522 rx_blocks->block_dma_addr = tmp_p_addr;
523 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
524 rxd_count[nic->rxd_mode],
525 GFP_KERNEL);
526 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
527 rx_blocks->rxds[l].virt_addr =
528 rx_blocks->block_virt_addr +
529 (rxd_size[nic->rxd_mode] * l);
530 rx_blocks->rxds[l].dma_addr =
531 rx_blocks->block_dma_addr +
532 (rxd_size[nic->rxd_mode] * l);
533 }
534
512 mac_control->rings[i].rx_blocks[j].block_virt_addr = 535 mac_control->rings[i].rx_blocks[j].block_virt_addr =
513 tmp_v_addr; 536 tmp_v_addr;
514 mac_control->rings[i].rx_blocks[j].block_dma_addr = 537 mac_control->rings[i].rx_blocks[j].block_dma_addr =
@@ -528,62 +551,58 @@ static int init_shared_mem(struct s2io_nic *nic)
528 blk_cnt].block_dma_addr; 551 blk_cnt].block_dma_addr;
529 552
530 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 553 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
531 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
532 * marker.
533 */
534#ifndef CONFIG_2BUFF_MODE
535 pre_rxd_blk->reserved_2_pNext_RxD_block = 554 pre_rxd_blk->reserved_2_pNext_RxD_block =
536 (unsigned long) tmp_v_addr_next; 555 (unsigned long) tmp_v_addr_next;
537#endif
538 pre_rxd_blk->pNext_RxD_Blk_physical = 556 pre_rxd_blk->pNext_RxD_Blk_physical =
539 (u64) tmp_p_addr_next; 557 (u64) tmp_p_addr_next;
540 } 558 }
541 } 559 }
542 560 if (nic->rxd_mode >= RXD_MODE_3A) {
543#ifdef CONFIG_2BUFF_MODE 561 /*
544 /* 562 * Allocation of Storages for buffer addresses in 2BUFF mode
545 * Allocation of Storages for buffer addresses in 2BUFF mode 563 * and the buffers as well.
546 * and the buffers as well. 564 */
547 */ 565 for (i = 0; i < config->rx_ring_num; i++) {
548 for (i = 0; i < config->rx_ring_num; i++) { 566 blk_cnt = config->rx_cfg[i].num_rxd /
549 blk_cnt = 567 (rxd_count[nic->rxd_mode]+ 1);
550 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 568 mac_control->rings[i].ba =
551 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 569 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
552 GFP_KERNEL); 570 GFP_KERNEL);
553 if (!mac_control->rings[i].ba) 571 if (!mac_control->rings[i].ba)
554 return -ENOMEM;
555 for (j = 0; j < blk_cnt; j++) {
556 int k = 0;
557 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
558 (MAX_RXDS_PER_BLOCK + 1)),
559 GFP_KERNEL);
560 if (!mac_control->rings[i].ba[j])
561 return -ENOMEM; 572 return -ENOMEM;
562 while (k != MAX_RXDS_PER_BLOCK) { 573 for (j = 0; j < blk_cnt; j++) {
563 ba = &mac_control->rings[i].ba[j][k]; 574 int k = 0;
564 575 mac_control->rings[i].ba[j] =
565 ba->ba_0_org = (void *) kmalloc 576 kmalloc((sizeof(buffAdd_t) *
566 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 577 (rxd_count[nic->rxd_mode] + 1)),
567 if (!ba->ba_0_org) 578 GFP_KERNEL);
568 return -ENOMEM; 579 if (!mac_control->rings[i].ba[j])
569 tmp = (unsigned long) ba->ba_0_org;
570 tmp += ALIGN_SIZE;
571 tmp &= ~((unsigned long) ALIGN_SIZE);
572 ba->ba_0 = (void *) tmp;
573
574 ba->ba_1_org = (void *) kmalloc
575 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
576 if (!ba->ba_1_org)
577 return -ENOMEM; 580 return -ENOMEM;
578 tmp = (unsigned long) ba->ba_1_org; 581 while (k != rxd_count[nic->rxd_mode]) {
579 tmp += ALIGN_SIZE; 582 ba = &mac_control->rings[i].ba[j][k];
580 tmp &= ~((unsigned long) ALIGN_SIZE); 583
581 ba->ba_1 = (void *) tmp; 584 ba->ba_0_org = (void *) kmalloc
582 k++; 585 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
586 if (!ba->ba_0_org)
587 return -ENOMEM;
588 tmp = (unsigned long)ba->ba_0_org;
589 tmp += ALIGN_SIZE;
590 tmp &= ~((unsigned long) ALIGN_SIZE);
591 ba->ba_0 = (void *) tmp;
592
593 ba->ba_1_org = (void *) kmalloc
594 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
595 if (!ba->ba_1_org)
596 return -ENOMEM;
597 tmp = (unsigned long) ba->ba_1_org;
598 tmp += ALIGN_SIZE;
599 tmp &= ~((unsigned long) ALIGN_SIZE);
600 ba->ba_1 = (void *) tmp;
601 k++;
602 }
583 } 603 }
584 } 604 }
585 } 605 }
586#endif
587 606
588 /* Allocation and initialization of Statistics block */ 607 /* Allocation and initialization of Statistics block */
589 size = sizeof(StatInfo_t); 608 size = sizeof(StatInfo_t);
@@ -669,11 +688,7 @@ static void free_shared_mem(struct s2io_nic *nic)
669 kfree(mac_control->fifos[i].list_info); 688 kfree(mac_control->fifos[i].list_info);
670 } 689 }
671 690
672#ifndef CONFIG_2BUFF_MODE
673 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
674#else
675 size = SIZE_OF_BLOCK; 691 size = SIZE_OF_BLOCK;
676#endif
677 for (i = 0; i < config->rx_ring_num; i++) { 692 for (i = 0; i < config->rx_ring_num; i++) {
678 blk_cnt = mac_control->rings[i].block_count; 693 blk_cnt = mac_control->rings[i].block_count;
679 for (j = 0; j < blk_cnt; j++) { 694 for (j = 0; j < blk_cnt; j++) {
@@ -685,30 +700,31 @@ static void free_shared_mem(struct s2io_nic *nic)
685 break; 700 break;
686 pci_free_consistent(nic->pdev, size, 701 pci_free_consistent(nic->pdev, size,
687 tmp_v_addr, tmp_p_addr); 702 tmp_v_addr, tmp_p_addr);
703 kfree(mac_control->rings[i].rx_blocks[j].rxds);
688 } 704 }
689 } 705 }
690 706
691#ifdef CONFIG_2BUFF_MODE 707 if (nic->rxd_mode >= RXD_MODE_3A) {
692 /* Freeing buffer storage addresses in 2BUFF mode. */ 708 /* Freeing buffer storage addresses in 2BUFF mode. */
693 for (i = 0; i < config->rx_ring_num; i++) { 709 for (i = 0; i < config->rx_ring_num; i++) {
694 blk_cnt = 710 blk_cnt = config->rx_cfg[i].num_rxd /
695 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 711 (rxd_count[nic->rxd_mode] + 1);
696 for (j = 0; j < blk_cnt; j++) { 712 for (j = 0; j < blk_cnt; j++) {
697 int k = 0; 713 int k = 0;
698 if (!mac_control->rings[i].ba[j]) 714 if (!mac_control->rings[i].ba[j])
699 continue; 715 continue;
700 while (k != MAX_RXDS_PER_BLOCK) { 716 while (k != rxd_count[nic->rxd_mode]) {
701 buffAdd_t *ba = &mac_control->rings[i].ba[j][k]; 717 buffAdd_t *ba =
702 kfree(ba->ba_0_org); 718 &mac_control->rings[i].ba[j][k];
703 kfree(ba->ba_1_org); 719 kfree(ba->ba_0_org);
704 k++; 720 kfree(ba->ba_1_org);
721 k++;
722 }
723 kfree(mac_control->rings[i].ba[j]);
705 } 724 }
706 kfree(mac_control->rings[i].ba[j]);
707 }
708 if (mac_control->rings[i].ba)
709 kfree(mac_control->rings[i].ba); 725 kfree(mac_control->rings[i].ba);
726 }
710 } 727 }
711#endif
712 728
713 if (mac_control->stats_mem) { 729 if (mac_control->stats_mem) {
714 pci_free_consistent(nic->pdev, 730 pci_free_consistent(nic->pdev,
@@ -1895,20 +1911,19 @@ static int start_nic(struct s2io_nic *nic)
1895 val64 = readq(&bar0->prc_ctrl_n[i]); 1911 val64 = readq(&bar0->prc_ctrl_n[i]);
1896 if (nic->config.bimodal) 1912 if (nic->config.bimodal)
1897 val64 |= PRC_CTRL_BIMODAL_INTERRUPT; 1913 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1898#ifndef CONFIG_2BUFF_MODE 1914 if (nic->rxd_mode == RXD_MODE_1)
1899 val64 |= PRC_CTRL_RC_ENABLED; 1915 val64 |= PRC_CTRL_RC_ENABLED;
1900#else 1916 else
1901 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 1917 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1902#endif
1903 writeq(val64, &bar0->prc_ctrl_n[i]); 1918 writeq(val64, &bar0->prc_ctrl_n[i]);
1904 } 1919 }
1905 1920
1906#ifdef CONFIG_2BUFF_MODE 1921 if (nic->rxd_mode == RXD_MODE_3B) {
1907 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 1922 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1908 val64 = readq(&bar0->rx_pa_cfg); 1923 val64 = readq(&bar0->rx_pa_cfg);
1909 val64 |= RX_PA_CFG_IGNORE_L2_ERR; 1924 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1910 writeq(val64, &bar0->rx_pa_cfg); 1925 writeq(val64, &bar0->rx_pa_cfg);
1911#endif 1926 }
1912 1927
1913 /* 1928 /*
1914 * Enabling MC-RLDRAM. After enabling the device, we timeout 1929 * Enabling MC-RLDRAM. After enabling the device, we timeout
@@ -2091,6 +2106,41 @@ static void stop_nic(struct s2io_nic *nic)
2091 } 2106 }
2092} 2107}
2093 2108
2109int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2110{
2111 struct net_device *dev = nic->dev;
2112 struct sk_buff *frag_list;
2113 u64 tmp;
2114
2115 /* Buffer-1 receives L3/L4 headers */
2116 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2117 (nic->pdev, skb->data, l3l4hdr_size + 4,
2118 PCI_DMA_FROMDEVICE);
2119
2120 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2121 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2122 if (skb_shinfo(skb)->frag_list == NULL) {
2123 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2124 return -ENOMEM ;
2125 }
2126 frag_list = skb_shinfo(skb)->frag_list;
2127 frag_list->next = NULL;
2128 tmp = (u64) frag_list->data;
2129 tmp += ALIGN_SIZE;
2130 tmp &= ~ALIGN_SIZE;
2131 frag_list->data = (void *) tmp;
2132 frag_list->tail = (void *) tmp;
2133
2134 /* Buffer-2 receives L4 data payload */
2135 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2136 frag_list->data, dev->mtu,
2137 PCI_DMA_FROMDEVICE);
2138 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2139 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2140
2141 return SUCCESS;
2142}
2143
2094/** 2144/**
2095 * fill_rx_buffers - Allocates the Rx side skbs 2145 * fill_rx_buffers - Allocates the Rx side skbs
2096 * @nic: device private variable 2146 * @nic: device private variable
@@ -2118,18 +2168,12 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2118 struct sk_buff *skb; 2168 struct sk_buff *skb;
2119 RxD_t *rxdp; 2169 RxD_t *rxdp;
2120 int off, off1, size, block_no, block_no1; 2170 int off, off1, size, block_no, block_no1;
2121 int offset, offset1;
2122 u32 alloc_tab = 0; 2171 u32 alloc_tab = 0;
2123 u32 alloc_cnt; 2172 u32 alloc_cnt;
2124 mac_info_t *mac_control; 2173 mac_info_t *mac_control;
2125 struct config_param *config; 2174 struct config_param *config;
2126#ifdef CONFIG_2BUFF_MODE
2127 RxD_t *rxdpnext;
2128 int nextblk;
2129 u64 tmp; 2175 u64 tmp;
2130 buffAdd_t *ba; 2176 buffAdd_t *ba;
2131 dma_addr_t rxdpphys;
2132#endif
2133#ifndef CONFIG_S2IO_NAPI 2177#ifndef CONFIG_S2IO_NAPI
2134 unsigned long flags; 2178 unsigned long flags;
2135#endif 2179#endif
@@ -2139,8 +2183,6 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2139 config = &nic->config; 2183 config = &nic->config;
2140 alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2184 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2141 atomic_read(&nic->rx_bufs_left[ring_no]); 2185 atomic_read(&nic->rx_bufs_left[ring_no]);
2142 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2143 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2144 2186
2145 while (alloc_tab < alloc_cnt) { 2187 while (alloc_tab < alloc_cnt) {
2146 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2188 block_no = mac_control->rings[ring_no].rx_curr_put_info.
@@ -2149,159 +2191,145 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2149 block_index; 2191 block_index;
2150 off = mac_control->rings[ring_no].rx_curr_put_info.offset; 2192 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2151 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2193 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2152#ifndef CONFIG_2BUFF_MODE
2153 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2154 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2155#else
2156 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2157 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2158#endif
2159 2194
2160 rxdp = mac_control->rings[ring_no].rx_blocks[block_no]. 2195 rxdp = mac_control->rings[ring_no].
2161 block_virt_addr + off; 2196 rx_blocks[block_no].rxds[off].virt_addr;
2162 if ((offset == offset1) && (rxdp->Host_Control)) { 2197
2163 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2198 if ((block_no == block_no1) && (off == off1) &&
2199 (rxdp->Host_Control)) {
2200 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2201 dev->name);
2164 DBG_PRINT(INTR_DBG, " info equated\n"); 2202 DBG_PRINT(INTR_DBG, " info equated\n");
2165 goto end; 2203 goto end;
2166 } 2204 }
2167#ifndef CONFIG_2BUFF_MODE 2205 if (off && (off == rxd_count[nic->rxd_mode])) {
2168 if (rxdp->Control_1 == END_OF_BLOCK) {
2169 mac_control->rings[ring_no].rx_curr_put_info. 2206 mac_control->rings[ring_no].rx_curr_put_info.
2170 block_index++; 2207 block_index++;
2208 if (mac_control->rings[ring_no].rx_curr_put_info.
2209 block_index == mac_control->rings[ring_no].
2210 block_count)
2211 mac_control->rings[ring_no].rx_curr_put_info.
2212 block_index = 0;
2213 block_no = mac_control->rings[ring_no].
2214 rx_curr_put_info.block_index;
2215 if (off == rxd_count[nic->rxd_mode])
2216 off = 0;
2171 mac_control->rings[ring_no].rx_curr_put_info. 2217 mac_control->rings[ring_no].rx_curr_put_info.
2172 block_index %= mac_control->rings[ring_no].block_count; 2218 offset = off;
2173 block_no = mac_control->rings[ring_no].rx_curr_put_info. 2219 rxdp = mac_control->rings[ring_no].
2174 block_index; 2220 rx_blocks[block_no].block_virt_addr;
2175 off++;
2176 off %= (MAX_RXDS_PER_BLOCK + 1);
2177 mac_control->rings[ring_no].rx_curr_put_info.offset =
2178 off;
2179 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2180 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2221 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2181 dev->name, rxdp); 2222 dev->name, rxdp);
2182 } 2223 }
2183#ifndef CONFIG_S2IO_NAPI 2224#ifndef CONFIG_S2IO_NAPI
2184 spin_lock_irqsave(&nic->put_lock, flags); 2225 spin_lock_irqsave(&nic->put_lock, flags);
2185 mac_control->rings[ring_no].put_pos = 2226 mac_control->rings[ring_no].put_pos =
2186 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2227 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2187 spin_unlock_irqrestore(&nic->put_lock, flags); 2228 spin_unlock_irqrestore(&nic->put_lock, flags);
2188#endif 2229#endif
2189#else 2230 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2190 if (rxdp->Host_Control == END_OF_BLOCK) { 2231 ((nic->rxd_mode >= RXD_MODE_3A) &&
2191 mac_control->rings[ring_no].rx_curr_put_info. 2232 (rxdp->Control_2 & BIT(0)))) {
2192 block_index++;
2193 mac_control->rings[ring_no].rx_curr_put_info.block_index
2194 %= mac_control->rings[ring_no].block_count;
2195 block_no = mac_control->rings[ring_no].rx_curr_put_info
2196 .block_index;
2197 off = 0;
2198 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2199 dev->name, block_no,
2200 (unsigned long long) rxdp->Control_1);
2201 mac_control->rings[ring_no].rx_curr_put_info.offset =
2202 off;
2203 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2204 block_virt_addr;
2205 }
2206#ifndef CONFIG_S2IO_NAPI
2207 spin_lock_irqsave(&nic->put_lock, flags);
2208 mac_control->rings[ring_no].put_pos = (block_no *
2209 (MAX_RXDS_PER_BLOCK + 1)) + off;
2210 spin_unlock_irqrestore(&nic->put_lock, flags);
2211#endif
2212#endif
2213
2214#ifndef CONFIG_2BUFF_MODE
2215 if (rxdp->Control_1 & RXD_OWN_XENA)
2216#else
2217 if (rxdp->Control_2 & BIT(0))
2218#endif
2219 {
2220 mac_control->rings[ring_no].rx_curr_put_info. 2233 mac_control->rings[ring_no].rx_curr_put_info.
2221 offset = off; 2234 offset = off;
2222 goto end; 2235 goto end;
2223 } 2236 }
2224#ifdef CONFIG_2BUFF_MODE 2237 /* calculate size of skb based on ring mode */
2225 /* 2238 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2226 * RxDs Spanning cache lines will be replenished only 2239 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2227 * if the succeeding RxD is also owned by Host. It 2240 if (nic->rxd_mode == RXD_MODE_1)
2228 * will always be the ((8*i)+3) and ((8*i)+6) 2241 size += NET_IP_ALIGN;
2229 * descriptors for the 48 byte descriptor. The offending 2242 else if (nic->rxd_mode == RXD_MODE_3B)
2230 * decsriptor is of-course the 3rd descriptor. 2243 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2231 */ 2244 else
2232 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no]. 2245 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2233 block_dma_addr + (off * sizeof(RxD_t));
2234 if (((u64) (rxdpphys)) % 128 > 80) {
2235 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2236 block_virt_addr + (off + 1);
2237 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2238 nextblk = (block_no + 1) %
2239 (mac_control->rings[ring_no].block_count);
2240 rxdpnext = mac_control->rings[ring_no].rx_blocks
2241 [nextblk].block_virt_addr;
2242 }
2243 if (rxdpnext->Control_2 & BIT(0))
2244 goto end;
2245 }
2246#endif
2247 2246
2248#ifndef CONFIG_2BUFF_MODE 2247 /* allocate skb */
2249 skb = dev_alloc_skb(size + NET_IP_ALIGN); 2248 skb = dev_alloc_skb(size);
2250#else 2249 if(!skb) {
2251 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2252#endif
2253 if (!skb) {
2254 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2250 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2255 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2251 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2256 if (first_rxdp) { 2252 if (first_rxdp) {
2257 wmb(); 2253 wmb();
2258 first_rxdp->Control_1 |= RXD_OWN_XENA; 2254 first_rxdp->Control_1 |= RXD_OWN_XENA;
2259 } 2255 }
2260 return -ENOMEM; 2256 return -ENOMEM ;
2257 }
2258 if (nic->rxd_mode == RXD_MODE_1) {
2259 /* 1 buffer mode - normal operation mode */
2260 memset(rxdp, 0, sizeof(RxD1_t));
2261 skb_reserve(skb, NET_IP_ALIGN);
2262 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2263 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2264 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
2265 rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
2266
2267 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2268 /*
2269 * 2 or 3 buffer mode -
2270 * Both 2 buffer mode and 3 buffer mode provides 128
2271 * byte aligned receive buffers.
2272 *
2273 * 3 buffer mode provides header separation where in
2274 * skb->data will have L3/L4 headers where as
2275 * skb_shinfo(skb)->frag_list will have the L4 data
2276 * payload
2277 */
2278
2279 memset(rxdp, 0, sizeof(RxD3_t));
2280 ba = &mac_control->rings[ring_no].ba[block_no][off];
2281 skb_reserve(skb, BUF0_LEN);
2282 tmp = (u64)(unsigned long) skb->data;
2283 tmp += ALIGN_SIZE;
2284 tmp &= ~ALIGN_SIZE;
2285 skb->data = (void *) (unsigned long)tmp;
2286 skb->tail = (void *) (unsigned long)tmp;
2287
2288 ((RxD3_t*)rxdp)->Buffer0_ptr =
2289 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2290 PCI_DMA_FROMDEVICE);
2291 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2292 if (nic->rxd_mode == RXD_MODE_3B) {
2293 /* Two buffer mode */
2294
2295 /*
2296 * Buffer2 will have L3/L4 header plus
2297 * L4 payload
2298 */
2299 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2300 (nic->pdev, skb->data, dev->mtu + 4,
2301 PCI_DMA_FROMDEVICE);
2302
2303 /* Buffer-1 will be dummy buffer not used */
2304 ((RxD3_t*)rxdp)->Buffer1_ptr =
2305 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2306 PCI_DMA_FROMDEVICE);
2307 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2308 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2309 (dev->mtu + 4);
2310 } else {
2311 /* 3 buffer mode */
2312 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2313 dev_kfree_skb_irq(skb);
2314 if (first_rxdp) {
2315 wmb();
2316 first_rxdp->Control_1 |=
2317 RXD_OWN_XENA;
2318 }
2319 return -ENOMEM ;
2320 }
2321 }
2322 rxdp->Control_2 |= BIT(0);
2261 } 2323 }
2262#ifndef CONFIG_2BUFF_MODE
2263 skb_reserve(skb, NET_IP_ALIGN);
2264 memset(rxdp, 0, sizeof(RxD_t));
2265 rxdp->Buffer0_ptr = pci_map_single
2266 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2267 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2268 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2269 rxdp->Host_Control = (unsigned long) (skb); 2324 rxdp->Host_Control = (unsigned long) (skb);
2270 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2325 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2271 rxdp->Control_1 |= RXD_OWN_XENA; 2326 rxdp->Control_1 |= RXD_OWN_XENA;
2272 off++; 2327 off++;
2273 off %= (MAX_RXDS_PER_BLOCK + 1); 2328 if (off == (rxd_count[nic->rxd_mode] + 1))
2274 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2329 off = 0;
2275#else
2276 ba = &mac_control->rings[ring_no].ba[block_no][off];
2277 skb_reserve(skb, BUF0_LEN);
2278 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2279 if (tmp)
2280 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2281
2282 memset(rxdp, 0, sizeof(RxD_t));
2283 rxdp->Buffer2_ptr = pci_map_single
2284 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2285 PCI_DMA_FROMDEVICE);
2286 rxdp->Buffer0_ptr =
2287 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2288 PCI_DMA_FROMDEVICE);
2289 rxdp->Buffer1_ptr =
2290 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2291 PCI_DMA_FROMDEVICE);
2292
2293 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2294 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2295 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2296 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2297 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2298 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2299 rxdp->Control_1 |= RXD_OWN_XENA;
2300 off++;
2301 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2330 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2302#endif
2303 rxdp->Control_2 |= SET_RXD_MARKER;
2304 2331
2332 rxdp->Control_2 |= SET_RXD_MARKER;
2305 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2333 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2306 if (first_rxdp) { 2334 if (first_rxdp) {
2307 wmb(); 2335 wmb();
@@ -2326,6 +2354,67 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2326 return SUCCESS; 2354 return SUCCESS;
2327} 2355}
2328 2356
2357static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2358{
2359 struct net_device *dev = sp->dev;
2360 int j;
2361 struct sk_buff *skb;
2362 RxD_t *rxdp;
2363 mac_info_t *mac_control;
2364 buffAdd_t *ba;
2365
2366 mac_control = &sp->mac_control;
2367 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2368 rxdp = mac_control->rings[ring_no].
2369 rx_blocks[blk].rxds[j].virt_addr;
2370 skb = (struct sk_buff *)
2371 ((unsigned long) rxdp->Host_Control);
2372 if (!skb) {
2373 continue;
2374 }
2375 if (sp->rxd_mode == RXD_MODE_1) {
2376 pci_unmap_single(sp->pdev, (dma_addr_t)
2377 ((RxD1_t*)rxdp)->Buffer0_ptr,
2378 dev->mtu +
2379 HEADER_ETHERNET_II_802_3_SIZE
2380 + HEADER_802_2_SIZE +
2381 HEADER_SNAP_SIZE,
2382 PCI_DMA_FROMDEVICE);
2383 memset(rxdp, 0, sizeof(RxD1_t));
2384 } else if(sp->rxd_mode == RXD_MODE_3B) {
2385 ba = &mac_control->rings[ring_no].
2386 ba[blk][j];
2387 pci_unmap_single(sp->pdev, (dma_addr_t)
2388 ((RxD3_t*)rxdp)->Buffer0_ptr,
2389 BUF0_LEN,
2390 PCI_DMA_FROMDEVICE);
2391 pci_unmap_single(sp->pdev, (dma_addr_t)
2392 ((RxD3_t*)rxdp)->Buffer1_ptr,
2393 BUF1_LEN,
2394 PCI_DMA_FROMDEVICE);
2395 pci_unmap_single(sp->pdev, (dma_addr_t)
2396 ((RxD3_t*)rxdp)->Buffer2_ptr,
2397 dev->mtu + 4,
2398 PCI_DMA_FROMDEVICE);
2399 memset(rxdp, 0, sizeof(RxD3_t));
2400 } else {
2401 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2403 PCI_DMA_FROMDEVICE);
2404 pci_unmap_single(sp->pdev, (dma_addr_t)
2405 ((RxD3_t*)rxdp)->Buffer1_ptr,
2406 l3l4hdr_size + 4,
2407 PCI_DMA_FROMDEVICE);
2408 pci_unmap_single(sp->pdev, (dma_addr_t)
2409 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2410 PCI_DMA_FROMDEVICE);
2411 memset(rxdp, 0, sizeof(RxD3_t));
2412 }
2413 dev_kfree_skb(skb);
2414 atomic_dec(&sp->rx_bufs_left[ring_no]);
2415 }
2416}
2417
2329/** 2418/**
2330 * free_rx_buffers - Frees all Rx buffers 2419 * free_rx_buffers - Frees all Rx buffers
2331 * @sp: device private variable. 2420 * @sp: device private variable.
@@ -2338,77 +2427,17 @@ int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2338static void free_rx_buffers(struct s2io_nic *sp) 2427static void free_rx_buffers(struct s2io_nic *sp)
2339{ 2428{
2340 struct net_device *dev = sp->dev; 2429 struct net_device *dev = sp->dev;
2341 int i, j, blk = 0, off, buf_cnt = 0; 2430 int i, blk = 0, buf_cnt = 0;
2342 RxD_t *rxdp;
2343 struct sk_buff *skb;
2344 mac_info_t *mac_control; 2431 mac_info_t *mac_control;
2345 struct config_param *config; 2432 struct config_param *config;
2346#ifdef CONFIG_2BUFF_MODE
2347 buffAdd_t *ba;
2348#endif
2349 2433
2350 mac_control = &sp->mac_control; 2434 mac_control = &sp->mac_control;
2351 config = &sp->config; 2435 config = &sp->config;
2352 2436
2353 for (i = 0; i < config->rx_ring_num; i++) { 2437 for (i = 0; i < config->rx_ring_num; i++) {
2354 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2438 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2355 off = j % (MAX_RXDS_PER_BLOCK + 1); 2439 free_rxd_blk(sp,i,blk);
2356 rxdp = mac_control->rings[i].rx_blocks[blk].
2357 block_virt_addr + off;
2358
2359#ifndef CONFIG_2BUFF_MODE
2360 if (rxdp->Control_1 == END_OF_BLOCK) {
2361 rxdp =
2362 (RxD_t *) ((unsigned long) rxdp->
2363 Control_2);
2364 j++;
2365 blk++;
2366 }
2367#else
2368 if (rxdp->Host_Control == END_OF_BLOCK) {
2369 blk++;
2370 continue;
2371 }
2372#endif
2373
2374 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2375 memset(rxdp, 0, sizeof(RxD_t));
2376 continue;
2377 }
2378 2440
2379 skb =
2380 (struct sk_buff *) ((unsigned long) rxdp->
2381 Host_Control);
2382 if (skb) {
2383#ifndef CONFIG_2BUFF_MODE
2384 pci_unmap_single(sp->pdev, (dma_addr_t)
2385 rxdp->Buffer0_ptr,
2386 dev->mtu +
2387 HEADER_ETHERNET_II_802_3_SIZE
2388 + HEADER_802_2_SIZE +
2389 HEADER_SNAP_SIZE,
2390 PCI_DMA_FROMDEVICE);
2391#else
2392 ba = &mac_control->rings[i].ba[blk][off];
2393 pci_unmap_single(sp->pdev, (dma_addr_t)
2394 rxdp->Buffer0_ptr,
2395 BUF0_LEN,
2396 PCI_DMA_FROMDEVICE);
2397 pci_unmap_single(sp->pdev, (dma_addr_t)
2398 rxdp->Buffer1_ptr,
2399 BUF1_LEN,
2400 PCI_DMA_FROMDEVICE);
2401 pci_unmap_single(sp->pdev, (dma_addr_t)
2402 rxdp->Buffer2_ptr,
2403 dev->mtu + BUF0_LEN + 4,
2404 PCI_DMA_FROMDEVICE);
2405#endif
2406 dev_kfree_skb(skb);
2407 atomic_dec(&sp->rx_bufs_left[i]);
2408 buf_cnt++;
2409 }
2410 memset(rxdp, 0, sizeof(RxD_t));
2411 }
2412 mac_control->rings[i].rx_curr_put_info.block_index = 0; 2441 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2413 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2442 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2414 mac_control->rings[i].rx_curr_put_info.offset = 0; 2443 mac_control->rings[i].rx_curr_put_info.offset = 0;
@@ -2514,7 +2543,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2514{ 2543{
2515 nic_t *nic = ring_data->nic; 2544 nic_t *nic = ring_data->nic;
2516 struct net_device *dev = (struct net_device *) nic->dev; 2545 struct net_device *dev = (struct net_device *) nic->dev;
2517 int get_block, get_offset, put_block, put_offset, ring_bufs; 2546 int get_block, put_block, put_offset;
2518 rx_curr_get_info_t get_info, put_info; 2547 rx_curr_get_info_t get_info, put_info;
2519 RxD_t *rxdp; 2548 RxD_t *rxdp;
2520 struct sk_buff *skb; 2549 struct sk_buff *skb;
@@ -2533,21 +2562,22 @@ static void rx_intr_handler(ring_info_t *ring_data)
2533 get_block = get_info.block_index; 2562 get_block = get_info.block_index;
2534 put_info = ring_data->rx_curr_put_info; 2563 put_info = ring_data->rx_curr_put_info;
2535 put_block = put_info.block_index; 2564 put_block = put_info.block_index;
2536 ring_bufs = get_info.ring_len+1; 2565 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2537 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2538 get_info.offset;
2539 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2540 get_info.offset;
2541#ifndef CONFIG_S2IO_NAPI 2566#ifndef CONFIG_S2IO_NAPI
2542 spin_lock(&nic->put_lock); 2567 spin_lock(&nic->put_lock);
2543 put_offset = ring_data->put_pos; 2568 put_offset = ring_data->put_pos;
2544 spin_unlock(&nic->put_lock); 2569 spin_unlock(&nic->put_lock);
2545#else 2570#else
2546 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) + 2571 put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) +
2547 put_info.offset; 2572 put_info.offset;
2548#endif 2573#endif
2549 while (RXD_IS_UP2DT(rxdp) && 2574 while (RXD_IS_UP2DT(rxdp)) {
2550 (((get_offset + 1) % ring_bufs) != put_offset)) { 2575 /* If your are next to put index then it's FIFO full condition */
2576 if ((get_block == put_block) &&
2577 (get_info.offset + 1) == put_info.offset) {
2578 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name);
2579 break;
2580 }
2551 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2581 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2552 if (skb == NULL) { 2582 if (skb == NULL) {
2553 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2583 DBG_PRINT(ERR_DBG, "%s: The skb is ",
@@ -2556,46 +2586,52 @@ static void rx_intr_handler(ring_info_t *ring_data)
2556 spin_unlock(&nic->rx_lock); 2586 spin_unlock(&nic->rx_lock);
2557 return; 2587 return;
2558 } 2588 }
2559#ifndef CONFIG_2BUFF_MODE 2589 if (nic->rxd_mode == RXD_MODE_1) {
2560 pci_unmap_single(nic->pdev, (dma_addr_t) 2590 pci_unmap_single(nic->pdev, (dma_addr_t)
2561 rxdp->Buffer0_ptr, 2591 ((RxD1_t*)rxdp)->Buffer0_ptr,
2562 dev->mtu + 2592 dev->mtu +
2563 HEADER_ETHERNET_II_802_3_SIZE + 2593 HEADER_ETHERNET_II_802_3_SIZE +
2564 HEADER_802_2_SIZE + 2594 HEADER_802_2_SIZE +
2565 HEADER_SNAP_SIZE, 2595 HEADER_SNAP_SIZE,
2566 PCI_DMA_FROMDEVICE); 2596 PCI_DMA_FROMDEVICE);
2567#else 2597 } else if (nic->rxd_mode == RXD_MODE_3B) {
2568 pci_unmap_single(nic->pdev, (dma_addr_t) 2598 pci_unmap_single(nic->pdev, (dma_addr_t)
2569 rxdp->Buffer0_ptr, 2599 ((RxD3_t*)rxdp)->Buffer0_ptr,
2570 BUF0_LEN, PCI_DMA_FROMDEVICE); 2600 BUF0_LEN, PCI_DMA_FROMDEVICE);
2571 pci_unmap_single(nic->pdev, (dma_addr_t) 2601 pci_unmap_single(nic->pdev, (dma_addr_t)
2572 rxdp->Buffer1_ptr, 2602 ((RxD3_t*)rxdp)->Buffer1_ptr,
2573 BUF1_LEN, PCI_DMA_FROMDEVICE); 2603 BUF1_LEN, PCI_DMA_FROMDEVICE);
2574 pci_unmap_single(nic->pdev, (dma_addr_t) 2604 pci_unmap_single(nic->pdev, (dma_addr_t)
2575 rxdp->Buffer2_ptr, 2605 ((RxD3_t*)rxdp)->Buffer2_ptr,
2576 dev->mtu + BUF0_LEN + 4, 2606 dev->mtu + 4,
2577 PCI_DMA_FROMDEVICE); 2607 PCI_DMA_FROMDEVICE);
2578#endif 2608 } else {
2609 pci_unmap_single(nic->pdev, (dma_addr_t)
2610 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2611 PCI_DMA_FROMDEVICE);
2612 pci_unmap_single(nic->pdev, (dma_addr_t)
2613 ((RxD3_t*)rxdp)->Buffer1_ptr,
2614 l3l4hdr_size + 4,
2615 PCI_DMA_FROMDEVICE);
2616 pci_unmap_single(nic->pdev, (dma_addr_t)
2617 ((RxD3_t*)rxdp)->Buffer2_ptr,
2618 dev->mtu, PCI_DMA_FROMDEVICE);
2619 }
2579 rx_osm_handler(ring_data, rxdp); 2620 rx_osm_handler(ring_data, rxdp);
2580 get_info.offset++; 2621 get_info.offset++;
2581 ring_data->rx_curr_get_info.offset = 2622 ring_data->rx_curr_get_info.offset = get_info.offset;
2582 get_info.offset; 2623 rxdp = ring_data->rx_blocks[get_block].
2583 rxdp = ring_data->rx_blocks[get_block].block_virt_addr + 2624 rxds[get_info.offset].virt_addr;
2584 get_info.offset; 2625 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2585 if (get_info.offset &&
2586 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2587 get_info.offset = 0; 2626 get_info.offset = 0;
2588 ring_data->rx_curr_get_info.offset 2627 ring_data->rx_curr_get_info.offset = get_info.offset;
2589 = get_info.offset;
2590 get_block++; 2628 get_block++;
2591 get_block %= ring_data->block_count; 2629 if (get_block == ring_data->block_count)
2592 ring_data->rx_curr_get_info.block_index 2630 get_block = 0;
2593 = get_block; 2631 ring_data->rx_curr_get_info.block_index = get_block;
2594 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2632 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2595 } 2633 }
2596 2634
2597 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2598 get_info.offset;
2599#ifdef CONFIG_S2IO_NAPI 2635#ifdef CONFIG_S2IO_NAPI
2600 nic->pkts_to_process -= 1; 2636 nic->pkts_to_process -= 1;
2601 if (!nic->pkts_to_process) 2637 if (!nic->pkts_to_process)
@@ -5538,16 +5574,7 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5538 ((unsigned long) rxdp->Host_Control); 5574 ((unsigned long) rxdp->Host_Control);
5539 int ring_no = ring_data->ring_no; 5575 int ring_no = ring_data->ring_no;
5540 u16 l3_csum, l4_csum; 5576 u16 l3_csum, l4_csum;
5541#ifdef CONFIG_2BUFF_MODE 5577
5542 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
5543 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
5544 int get_block = ring_data->rx_curr_get_info.block_index;
5545 int get_off = ring_data->rx_curr_get_info.offset;
5546 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5547 unsigned char *buff;
5548#else
5549 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
5550#endif
5551 skb->dev = dev; 5578 skb->dev = dev;
5552 if (rxdp->Control_1 & RXD_T_CODE) { 5579 if (rxdp->Control_1 & RXD_T_CODE) {
5553 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; 5580 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
@@ -5564,19 +5591,36 @@ static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
5564 rxdp->Host_Control = 0; 5591 rxdp->Host_Control = 0;
5565 sp->rx_pkt_count++; 5592 sp->rx_pkt_count++;
5566 sp->stats.rx_packets++; 5593 sp->stats.rx_packets++;
5567#ifndef CONFIG_2BUFF_MODE 5594 if (sp->rxd_mode == RXD_MODE_1) {
5568 sp->stats.rx_bytes += len; 5595 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
5569#else
5570 sp->stats.rx_bytes += buf0_len + buf2_len;
5571#endif
5572 5596
5573#ifndef CONFIG_2BUFF_MODE 5597 sp->stats.rx_bytes += len;
5574 skb_put(skb, len); 5598 skb_put(skb, len);
5575#else 5599
5576 buff = skb_push(skb, buf0_len); 5600 } else if (sp->rxd_mode >= RXD_MODE_3A) {
5577 memcpy(buff, ba->ba_0, buf0_len); 5601 int get_block = ring_data->rx_curr_get_info.block_index;
5578 skb_put(skb, buf2_len); 5602 int get_off = ring_data->rx_curr_get_info.offset;
5579#endif 5603 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
5604 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
5605 unsigned char *buff = skb_push(skb, buf0_len);
5606
5607 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
5608 sp->stats.rx_bytes += buf0_len + buf2_len;
5609 memcpy(buff, ba->ba_0, buf0_len);
5610
5611 if (sp->rxd_mode == RXD_MODE_3A) {
5612 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
5613
5614 skb_put(skb, buf1_len);
5615 skb->len += buf2_len;
5616 skb->data_len += buf2_len;
5617 skb->truesize += buf2_len;
5618 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
5619 sp->stats.rx_bytes += buf1_len;
5620
5621 } else
5622 skb_put(skb, buf2_len);
5623 }
5580 5624
5581 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 5625 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
5582 (sp->rx_csum)) { 5626 (sp->rx_csum)) {
@@ -5712,6 +5756,7 @@ MODULE_VERSION(DRV_VERSION);
5712 5756
5713module_param(tx_fifo_num, int, 0); 5757module_param(tx_fifo_num, int, 0);
5714module_param(rx_ring_num, int, 0); 5758module_param(rx_ring_num, int, 0);
5759module_param(rx_ring_mode, int, 0);
5715module_param_array(tx_fifo_len, uint, NULL, 0); 5760module_param_array(tx_fifo_len, uint, NULL, 0);
5716module_param_array(rx_ring_sz, uint, NULL, 0); 5761module_param_array(rx_ring_sz, uint, NULL, 0);
5717module_param_array(rts_frm_len, uint, NULL, 0); 5762module_param_array(rts_frm_len, uint, NULL, 0);
@@ -5723,6 +5768,7 @@ module_param(shared_splits, int, 0);
5723module_param(tmac_util_period, int, 0); 5768module_param(tmac_util_period, int, 0);
5724module_param(rmac_util_period, int, 0); 5769module_param(rmac_util_period, int, 0);
5725module_param(bimodal, bool, 0); 5770module_param(bimodal, bool, 0);
5771module_param(l3l4hdr_size, int , 0);
5726#ifndef CONFIG_S2IO_NAPI 5772#ifndef CONFIG_S2IO_NAPI
5727module_param(indicate_max_pkts, int, 0); 5773module_param(indicate_max_pkts, int, 0);
5728#endif 5774#endif
@@ -5844,6 +5890,13 @@ Defaulting to INTA\n");
5844 sp->pdev = pdev; 5890 sp->pdev = pdev;
5845 sp->high_dma_flag = dma_flag; 5891 sp->high_dma_flag = dma_flag;
5846 sp->device_enabled_once = FALSE; 5892 sp->device_enabled_once = FALSE;
5893 if (rx_ring_mode == 1)
5894 sp->rxd_mode = RXD_MODE_1;
5895 if (rx_ring_mode == 2)
5896 sp->rxd_mode = RXD_MODE_3B;
5897 if (rx_ring_mode == 3)
5898 sp->rxd_mode = RXD_MODE_3A;
5899
5847 sp->intr_type = dev_intr_type; 5900 sp->intr_type = dev_intr_type;
5848 5901
5849 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || 5902 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
@@ -5896,7 +5949,7 @@ Defaulting to INTA\n");
5896 config->rx_ring_num = rx_ring_num; 5949 config->rx_ring_num = rx_ring_num;
5897 for (i = 0; i < MAX_RX_RINGS; i++) { 5950 for (i = 0; i < MAX_RX_RINGS; i++) {
5898 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5951 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5899 (MAX_RXDS_PER_BLOCK + 1); 5952 (rxd_count[sp->rxd_mode] + 1);
5900 config->rx_cfg[i].ring_priority = i; 5953 config->rx_cfg[i].ring_priority = i;
5901 } 5954 }
5902 5955
@@ -6091,9 +6144,6 @@ Defaulting to INTA\n");
6091 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6144 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6092 get_xena_rev_id(sp->pdev), 6145 get_xena_rev_id(sp->pdev),
6093 s2io_driver_version); 6146 s2io_driver_version);
6094#ifdef CONFIG_2BUFF_MODE
6095 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6096#endif
6097 switch(sp->intr_type) { 6147 switch(sp->intr_type) {
6098 case INTA: 6148 case INTA:
6099 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6149 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6126,9 +6176,6 @@ Defaulting to INTA\n");
6126 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6176 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
6127 get_xena_rev_id(sp->pdev), 6177 get_xena_rev_id(sp->pdev),
6128 s2io_driver_version); 6178 s2io_driver_version);
6129#ifdef CONFIG_2BUFF_MODE
6130 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
6131#endif
6132 switch(sp->intr_type) { 6179 switch(sp->intr_type) {
6133 case INTA: 6180 case INTA:
6134 DBG_PRINT(ERR_DBG, ", Intr type INTA"); 6181 DBG_PRINT(ERR_DBG, ", Intr type INTA");
@@ -6149,6 +6196,12 @@ Defaulting to INTA\n");
6149 sp->def_mac_addr[0].mac_addr[4], 6196 sp->def_mac_addr[0].mac_addr[4],
6150 sp->def_mac_addr[0].mac_addr[5]); 6197 sp->def_mac_addr[0].mac_addr[5]);
6151 } 6198 }
6199 if (sp->rxd_mode == RXD_MODE_3B)
6200 DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
6201 "enabled\n",dev->name);
6202 if (sp->rxd_mode == RXD_MODE_3A)
6203 DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
6204 "enabled\n",dev->name);
6152 6205
6153 /* Initialize device name */ 6206 /* Initialize device name */
6154 strcpy(sp->name, dev->name); 6207 strcpy(sp->name, dev->name);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 1cc24b56760e..419aad7f10e7 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -418,7 +418,7 @@ typedef struct list_info_hold {
418 void *list_virt_addr; 418 void *list_virt_addr;
419} list_info_hold_t; 419} list_info_hold_t;
420 420
421/* Rx descriptor structure */ 421/* Rx descriptor structure for 1 buffer mode */
422typedef struct _RxD_t { 422typedef struct _RxD_t {
423 u64 Host_Control; /* reserved for host */ 423 u64 Host_Control; /* reserved for host */
424 u64 Control_1; 424 u64 Control_1;
@@ -439,49 +439,54 @@ typedef struct _RxD_t {
439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2) 439#define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2)
440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62) 440#define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62)
441 441
442#ifndef CONFIG_2BUFF_MODE
443#define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14)
444#define SET_BUFFER0_SIZE(val) vBIT(val,2,14)
445#else
446#define MASK_BUFFER0_SIZE vBIT(0xFF,2,14)
447#define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
448#define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
449#define SET_BUFFER0_SIZE(val) vBIT(val,8,8)
450#define SET_BUFFER1_SIZE(val) vBIT(val,16,16)
451#define SET_BUFFER2_SIZE(val) vBIT(val,32,16)
452#endif
453
454#define MASK_VLAN_TAG vBIT(0xFFFF,48,16) 442#define MASK_VLAN_TAG vBIT(0xFFFF,48,16)
455#define SET_VLAN_TAG(val) vBIT(val,48,16) 443#define SET_VLAN_TAG(val) vBIT(val,48,16)
456#define SET_NUM_TAG(val) vBIT(val,16,32) 444#define SET_NUM_TAG(val) vBIT(val,16,32)
457 445
458#ifndef CONFIG_2BUFF_MODE 446
459#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14))) 447} RxD_t;
460#else 448/* Rx descriptor structure for 1 buffer mode */
461#define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 449typedef struct _RxD1_t {
462 >> 48) 450 struct _RxD_t h;
463#define RXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) \ 451
464 >> 32) 452#define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14)
465#define RXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) \ 453#define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14)
466 >> 16) 454#define RXD_GET_BUFFER0_SIZE_1(_Control_2) \
455 (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48)
456 u64 Buffer0_ptr;
457} RxD1_t;
458/* Rx descriptor structure for 3 or 2 buffer mode */
459
460typedef struct _RxD3_t {
461 struct _RxD_t h;
462
463#define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14)
464#define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16)
465#define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16)
466#define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8)
467#define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16)
468#define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16)
469#define RXD_GET_BUFFER0_SIZE_3(Control_2) \
470 (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48)
471#define RXD_GET_BUFFER1_SIZE_3(Control_2) \
472 (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32)
473#define RXD_GET_BUFFER2_SIZE_3(Control_2) \
474 (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16)
467#define BUF0_LEN 40 475#define BUF0_LEN 40
468#define BUF1_LEN 1 476#define BUF1_LEN 1
469#endif
470 477
471 u64 Buffer0_ptr; 478 u64 Buffer0_ptr;
472#ifdef CONFIG_2BUFF_MODE
473 u64 Buffer1_ptr; 479 u64 Buffer1_ptr;
474 u64 Buffer2_ptr; 480 u64 Buffer2_ptr;
475#endif 481} RxD3_t;
476} RxD_t; 482
477 483
478/* Structure that represents the Rx descriptor block which contains 484/* Structure that represents the Rx descriptor block which contains
479 * 128 Rx descriptors. 485 * 128 Rx descriptors.
480 */ 486 */
481#ifndef CONFIG_2BUFF_MODE
482typedef struct _RxD_block { 487typedef struct _RxD_block {
483#define MAX_RXDS_PER_BLOCK 127 488#define MAX_RXDS_PER_BLOCK_1 127
484 RxD_t rxd[MAX_RXDS_PER_BLOCK]; 489 RxD1_t rxd[MAX_RXDS_PER_BLOCK_1];
485 490
486 u64 reserved_0; 491 u64 reserved_0;
487#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 492#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
@@ -492,18 +497,13 @@ typedef struct _RxD_block {
492 * the upper 32 bits should 497 * the upper 32 bits should
493 * be 0 */ 498 * be 0 */
494} RxD_block_t; 499} RxD_block_t;
495#else
496typedef struct _RxD_block {
497#define MAX_RXDS_PER_BLOCK 85
498 RxD_t rxd[MAX_RXDS_PER_BLOCK];
499 500
500#define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL
501 u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd
502 * in this blk */
503 u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */
504} RxD_block_t;
505#define SIZE_OF_BLOCK 4096 501#define SIZE_OF_BLOCK 4096
506 502
503#define RXD_MODE_1 0
504#define RXD_MODE_3A 1
505#define RXD_MODE_3B 2
506
507/* Structure to hold virtual addresses of Buf0 and Buf1 in 507/* Structure to hold virtual addresses of Buf0 and Buf1 in
508 * 2buf mode. */ 508 * 2buf mode. */
509typedef struct bufAdd { 509typedef struct bufAdd {
@@ -512,7 +512,6 @@ typedef struct bufAdd {
512 void *ba_0; 512 void *ba_0;
513 void *ba_1; 513 void *ba_1;
514} buffAdd_t; 514} buffAdd_t;
515#endif
516 515
517/* Structure which stores all the MAC control parameters */ 516/* Structure which stores all the MAC control parameters */
518 517
@@ -539,10 +538,17 @@ typedef struct {
539 538
540typedef tx_curr_get_info_t tx_curr_put_info_t; 539typedef tx_curr_get_info_t tx_curr_put_info_t;
541 540
541
542typedef struct rxd_info {
543 void *virt_addr;
544 dma_addr_t dma_addr;
545}rxd_info_t;
546
542/* Structure that holds the Phy and virt addresses of the Blocks */ 547/* Structure that holds the Phy and virt addresses of the Blocks */
543typedef struct rx_block_info { 548typedef struct rx_block_info {
544 RxD_t *block_virt_addr; 549 void *block_virt_addr;
545 dma_addr_t block_dma_addr; 550 dma_addr_t block_dma_addr;
551 rxd_info_t *rxds;
546} rx_block_info_t; 552} rx_block_info_t;
547 553
548/* pre declaration of the nic structure */ 554/* pre declaration of the nic structure */
@@ -578,10 +584,8 @@ typedef struct ring_info {
578 int put_pos; 584 int put_pos;
579#endif 585#endif
580 586
581#ifdef CONFIG_2BUFF_MODE
582 /* Buffer Address store. */ 587 /* Buffer Address store. */
583 buffAdd_t **ba; 588 buffAdd_t **ba;
584#endif
585 nic_t *nic; 589 nic_t *nic;
586} ring_info_t; 590} ring_info_t;
587 591
@@ -647,8 +651,6 @@ typedef struct {
647 651
648/* Default Tunable parameters of the NIC. */ 652/* Default Tunable parameters of the NIC. */
649#define DEFAULT_FIFO_LEN 4096 653#define DEFAULT_FIFO_LEN 4096
650#define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1)
651#define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1)
652#define SMALL_BLK_CNT 30 654#define SMALL_BLK_CNT 30
653#define LARGE_BLK_CNT 100 655#define LARGE_BLK_CNT 100
654 656
@@ -678,6 +680,7 @@ struct msix_info_st {
678 680
679/* Structure representing one instance of the NIC */ 681/* Structure representing one instance of the NIC */
680struct s2io_nic { 682struct s2io_nic {
683 int rxd_mode;
681#ifdef CONFIG_S2IO_NAPI 684#ifdef CONFIG_S2IO_NAPI
682 /* 685 /*
683 * Count of packets to be processed in a given iteration, it will be indicated 686 * Count of packets to be processed in a given iteration, it will be indicated
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index fd0167077fbe..110e777f206e 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -997,10 +997,7 @@ static void __devexit saa9730_remove_one(struct pci_dev *pdev)
997 997
998 if (dev) { 998 if (dev) {
999 unregister_netdev(dev); 999 unregister_netdev(dev);
1000 1000 kfree(dev->priv);
1001 if (dev->priv)
1002 kfree(dev->priv);
1003
1004 free_netdev(dev); 1001 free_netdev(dev);
1005 pci_release_regions(pdev); 1002 pci_release_regions(pdev);
1006 pci_disable_device(pdev); 1003 pci_disable_device(pdev);
@@ -1096,8 +1093,7 @@ static int lan_saa9730_init(struct net_device *dev, int ioaddr, int irq)
1096 return 0; 1093 return 0;
1097 1094
1098 out: 1095 out:
1099 if (dev->priv) 1096 kfree(dev->priv);
1100 kfree(dev->priv);
1101 return ret; 1097 return ret;
1102} 1098}
1103 1099
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 92f75529eff8..478791e09bf7 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -842,7 +842,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) { 843 i++, mclist = mclist->next) {
844 int bit_nr = 844 int bit_nr =
845 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 845 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast; 847 rx_mode |= AcceptMulticast;
848 } 848 }
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 23b713c700b3..1d4d88680db1 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1696,15 +1696,20 @@ static int sis900_rx(struct net_device *net_dev)
1696 long ioaddr = net_dev->base_addr; 1696 long ioaddr = net_dev->base_addr;
1697 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1697 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1698 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1698 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1699 int rx_work_limit;
1699 1700
1700 if (netif_msg_rx_status(sis_priv)) 1701 if (netif_msg_rx_status(sis_priv))
1701 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d " 1702 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
1702 "status:0x%8.8x\n", 1703 "status:0x%8.8x\n",
1703 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status); 1704 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
1705 rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
1704 1706
1705 while (rx_status & OWN) { 1707 while (rx_status & OWN) {
1706 unsigned int rx_size; 1708 unsigned int rx_size;
1707 1709
1710 if (--rx_work_limit < 0)
1711 break;
1712
1708 rx_size = (rx_status & DSIZE) - CRC_SIZE; 1713 rx_size = (rx_status & DSIZE) - CRC_SIZE;
1709 1714
1710 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { 1715 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
@@ -1732,9 +1737,11 @@ static int sis900_rx(struct net_device *net_dev)
1732 we are working on NULL sk_buff :-( */ 1737 we are working on NULL sk_buff :-( */
1733 if (sis_priv->rx_skbuff[entry] == NULL) { 1738 if (sis_priv->rx_skbuff[entry] == NULL) {
1734 if (netif_msg_rx_err(sis_priv)) 1739 if (netif_msg_rx_err(sis_priv))
1735 printk(KERN_INFO "%s: NULL pointer " 1740 printk(KERN_WARNING "%s: NULL pointer "
1736 "encountered in Rx ring, skipping\n", 1741 "encountered in Rx ring\n"
1737 net_dev->name); 1742 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1743 net_dev->name, sis_priv->cur_rx,
1744 sis_priv->dirty_rx);
1738 break; 1745 break;
1739 } 1746 }
1740 1747
@@ -1770,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1770 sis_priv->rx_ring[entry].cmdsts = 0; 1777 sis_priv->rx_ring[entry].cmdsts = 0;
1771 sis_priv->rx_ring[entry].bufptr = 0; 1778 sis_priv->rx_ring[entry].bufptr = 0;
1772 sis_priv->stats.rx_dropped++; 1779 sis_priv->stats.rx_dropped++;
1780 sis_priv->cur_rx++;
1773 break; 1781 break;
1774 } 1782 }
1775 skb->dev = net_dev; 1783 skb->dev = net_dev;
@@ -1787,7 +1795,7 @@ static int sis900_rx(struct net_device *net_dev)
1787 1795
1788 /* refill the Rx buffer, what if the rate of refilling is slower 1796 /* refill the Rx buffer, what if the rate of refilling is slower
1789 * than consuming ?? */ 1797 * than consuming ?? */
1790 for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) { 1798 for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
1791 struct sk_buff *skb; 1799 struct sk_buff *skb;
1792 1800
1793 entry = sis_priv->dirty_rx % NUM_RX_DESC; 1801 entry = sis_priv->dirty_rx % NUM_RX_DESC;
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index f17c05cbe44b..99a776a51fb5 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -1896,7 +1896,7 @@ void smt_swap_para(struct smt_header *sm, int len, int direction)
1896 1896
1897static void smt_string_swap(char *data, const char *format, int len) 1897static void smt_string_swap(char *data, const char *format, int len)
1898{ 1898{
1899 const char *open_paren = 0 ; 1899 const char *open_paren = NULL ;
1900 int x ; 1900 int x ;
1901 1901
1902 while (len > 0 && *format) { 1902 while (len > 0 && *format) {
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 1438fdd20826..74d5f1a6fdea 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -77,7 +77,7 @@ static const char version[] =
77#include <linux/errno.h> 77#include <linux/errno.h>
78#include <linux/ioport.h> 78#include <linux/ioport.h>
79#include <linux/crc32.h> 79#include <linux/crc32.h>
80#include <linux/device.h> 80#include <linux/platform_device.h>
81#include <linux/spinlock.h> 81#include <linux/spinlock.h>
82#include <linux/ethtool.h> 82#include <linux/ethtool.h>
83#include <linux/mii.h> 83#include <linux/mii.h>
@@ -1983,6 +1983,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
1983 if (lp->version >= (CHIP_91100 << 4)) 1983 if (lp->version >= (CHIP_91100 << 4))
1984 smc_phy_detect(dev); 1984 smc_phy_detect(dev);
1985 1985
1986 /* then shut everything down to save power */
1987 smc_shutdown(dev);
1988 smc_phy_powerdown(dev);
1989
1986 /* Set default parameters */ 1990 /* Set default parameters */
1987 lp->msg_enable = NETIF_MSG_LINK; 1991 lp->msg_enable = NETIF_MSG_LINK;
1988 lp->ctl_rfduplx = 0; 1992 lp->ctl_rfduplx = 0;
@@ -2291,11 +2295,11 @@ static int smc_drv_remove(struct device *dev)
2291 return 0; 2295 return 0;
2292} 2296}
2293 2297
2294static int smc_drv_suspend(struct device *dev, pm_message_t state, u32 level) 2298static int smc_drv_suspend(struct device *dev, pm_message_t state)
2295{ 2299{
2296 struct net_device *ndev = dev_get_drvdata(dev); 2300 struct net_device *ndev = dev_get_drvdata(dev);
2297 2301
2298 if (ndev && level == SUSPEND_DISABLE) { 2302 if (ndev) {
2299 if (netif_running(ndev)) { 2303 if (netif_running(ndev)) {
2300 netif_device_detach(ndev); 2304 netif_device_detach(ndev);
2301 smc_shutdown(ndev); 2305 smc_shutdown(ndev);
@@ -2305,12 +2309,12 @@ static int smc_drv_suspend(struct device *dev, pm_message_t state, u32 level)
2305 return 0; 2309 return 0;
2306} 2310}
2307 2311
2308static int smc_drv_resume(struct device *dev, u32 level) 2312static int smc_drv_resume(struct device *dev)
2309{ 2313{
2310 struct platform_device *pdev = to_platform_device(dev); 2314 struct platform_device *pdev = to_platform_device(dev);
2311 struct net_device *ndev = dev_get_drvdata(dev); 2315 struct net_device *ndev = dev_get_drvdata(dev);
2312 2316
2313 if (ndev && level == RESUME_ENABLE) { 2317 if (ndev) {
2314 struct smc_local *lp = netdev_priv(ndev); 2318 struct smc_local *lp = netdev_priv(ndev);
2315 smc_enable_device(pdev); 2319 smc_enable_device(pdev);
2316 if (netif_running(ndev)) { 2320 if (netif_running(ndev)) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ac9ce6509eee..817f200742c3 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -230,12 +230,12 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
230#define SMC_CAN_USE_16BIT 1 230#define SMC_CAN_USE_16BIT 1
231#define SMC_CAN_USE_32BIT 0 231#define SMC_CAN_USE_32BIT 0
232 232
233#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) 233#define SMC_inb(a, r) inb((u32)a) + (r))
234#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) 234#define SMC_inw(a, r) inw(((u32)a) + (r))
235#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) 235#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r))
236#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000) 236#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r))
237#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) 237#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
238#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) 238#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
239 239
240#define set_irq_type(irq, type) do {} while(0) 240#define set_irq_type(irq, type) do {} while(0)
241 241
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index efdb179ecc8c..38b2b0a3ce96 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1091,8 +1091,10 @@ static int netdev_open(struct net_device *dev)
1091 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; 1091 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1092 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 1092 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1093 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 1093 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1094 if (np->queue_mem == 0) 1094 if (np->queue_mem == NULL) {
1095 free_irq(dev->irq, dev);
1095 return -ENOMEM; 1096 return -ENOMEM;
1097 }
1096 1098
1097 np->tx_done_q = np->queue_mem; 1099 np->tx_done_q = np->queue_mem;
1098 np->tx_done_q_dma = np->queue_mem_dma; 1100 np->tx_done_q_dma = np->queue_mem_dma;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 5de0554fd7c6..0ab9c38b4a34 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -80,7 +80,7 @@
80 I/O access could affect performance in ARM-based system 80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support 81 - Add Linux software VLAN support
82 82
83 Version LK1.08 (D-Link): 83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address 84 - Fix bug of custom mac address
85 (StationAddr register only accept word write) 85 (StationAddr register only accept word write)
86 86
@@ -91,11 +91,14 @@
91 Version LK1.09a (ICPlus): 91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM 92 - Add the delay time in reading the contents of EEPROM
93 93
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
96
94*/ 97*/
95 98
96#define DRV_NAME "sundance" 99#define DRV_NAME "sundance"
97#define DRV_VERSION "1.01+LK1.09a" 100#define DRV_VERSION "1.01+LK1.10"
98#define DRV_RELDATE "10-Jul-2003" 101#define DRV_RELDATE "28-Oct-2005"
99 102
100 103
101/* The user-configurable values. 104/* The user-configurable values.
@@ -263,8 +266,10 @@ IV. Notes
263IVb. References 266IVb. References
264 267
265The Sundance ST201 datasheet, preliminary version. 268The Sundance ST201 datasheet, preliminary version.
266http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html 269The Kendin KS8723 datasheet, preliminary version.
267http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html 270The ICplus IP100 datasheet, preliminary version.
271http://www.scyld.com/expert/100mbps.html
272http://www.scyld.com/expert/NWay.html
268 273
269IVc. Errata 274IVc. Errata
270 275
@@ -500,6 +505,25 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static int netdev_close(struct net_device *dev); 505static int netdev_close(struct net_device *dev);
501static struct ethtool_ops ethtool_ops; 506static struct ethtool_ops ethtool_ops;
502 507
508static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
509{
510 struct netdev_private *np = netdev_priv(dev);
511 void __iomem *ioaddr = np->base + ASICCtrl;
512 int countdown;
513
514 /* ST201 documentation states ASICCtrl is a 32bit register */
515 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
516 /* ST201 documentation states reset can take up to 1 ms */
517 countdown = 10 + 1;
518 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
519 if (--countdown == 0) {
520 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
521 break;
522 }
523 udelay(100);
524 }
525}
526
503static int __devinit sundance_probe1 (struct pci_dev *pdev, 527static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent) 528 const struct pci_device_id *ent)
505{ 529{
@@ -1190,23 +1214,33 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1190 ("%s: Transmit status is %2.2x.\n", 1214 ("%s: Transmit status is %2.2x.\n",
1191 dev->name, tx_status); 1215 dev->name, tx_status);
1192 if (tx_status & 0x1e) { 1216 if (tx_status & 0x1e) {
1217 if (netif_msg_tx_err(np))
1218 printk("%s: Transmit error status %4.4x.\n",
1219 dev->name, tx_status);
1193 np->stats.tx_errors++; 1220 np->stats.tx_errors++;
1194 if (tx_status & 0x10) 1221 if (tx_status & 0x10)
1195 np->stats.tx_fifo_errors++; 1222 np->stats.tx_fifo_errors++;
1196 if (tx_status & 0x08) 1223 if (tx_status & 0x08)
1197 np->stats.collisions++; 1224 np->stats.collisions++;
1225 if (tx_status & 0x04)
1226 np->stats.tx_fifo_errors++;
1198 if (tx_status & 0x02) 1227 if (tx_status & 0x02)
1199 np->stats.tx_window_errors++; 1228 np->stats.tx_window_errors++;
1200 /* This reset has not been verified!. */ 1229 /*
1201 if (tx_status & 0x10) { /* Reset the Tx. */ 1230 ** This reset has been verified on
1202 np->stats.tx_fifo_errors++; 1231 ** DFE-580TX boards ! phdm@macqel.be.
1203 spin_lock(&np->lock); 1232 */
1204 reset_tx(dev); 1233 if (tx_status & 0x10) { /* TxUnderrun */
1205 spin_unlock(&np->lock); 1234 unsigned short txthreshold;
1235
1236 txthreshold = ioread16 (ioaddr + TxStartThresh);
1237 /* Restart Tx FIFO and transmitter */
1238 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1239 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1240 /* No need to reset the Tx pointer here */
1206 } 1241 }
1207 if (tx_status & 0x1e) /* Restart the Tx. */ 1242 /* Restart the Tx. */
1208 iowrite16 (TxEnable, 1243 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1209 ioaddr + MACCtrl1);
1210 } 1244 }
1211 /* Yup, this is a documentation bug. It cost me *hours*. */ 1245 /* Yup, this is a documentation bug. It cost me *hours*. */
1212 iowrite16 (0, ioaddr + TxStatus); 1246 iowrite16 (0, ioaddr + TxStatus);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 1802c3b48799..1828a6bf8458 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <linux/dma-mapping.h>
40 41
41#include <net/checksum.h> 42#include <net/checksum.h>
42 43
@@ -67,8 +68,8 @@
67 68
68#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.42" 71#define DRV_MODULE_VERSION "3.43"
71#define DRV_MODULE_RELDATE "Oct 3, 2005" 72#define DRV_MODULE_RELDATE "Oct 24, 2005"
72 73
73#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -219,6 +220,10 @@ static struct pci_device_id tg3_pci_tbl[] = {
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, 221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, 227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, 229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
@@ -466,6 +471,15 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
466 spin_unlock_irqrestore(&tp->indirect_lock, flags); 471 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467} 472}
468 473
474static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
475{
476 /* If no workaround is needed, write to mem space directly */
477 if (tp->write32 != tg3_write_indirect_reg32)
478 tw32(NIC_SRAM_WIN_BASE + off, val);
479 else
480 tg3_write_mem(tp, off, val);
481}
482
469static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 483static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
470{ 484{
471 unsigned long flags; 485 unsigned long flags;
@@ -570,7 +584,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
570 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 584 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
571 u32 orig_clock_ctrl; 585 u32 orig_clock_ctrl;
572 586
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 587 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
574 return; 588 return;
575 589
576 orig_clock_ctrl = clock_ctrl; 590 orig_clock_ctrl = clock_ctrl;
@@ -1210,7 +1224,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1210 CLOCK_CTRL_ALTCLK | 1224 CLOCK_CTRL_ALTCLK |
1211 CLOCK_CTRL_PWRDOWN_PLL133); 1225 CLOCK_CTRL_PWRDOWN_PLL133);
1212 udelay(40); 1226 udelay(40);
1213 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 1227 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1214 /* do nothing */ 1228 /* do nothing */
1215 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 1229 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { 1230 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
@@ -3712,14 +3726,14 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3712 dev->mtu = new_mtu; 3726 dev->mtu = new_mtu;
3713 3727
3714 if (new_mtu > ETH_DATA_LEN) { 3728 if (new_mtu > ETH_DATA_LEN) {
3715 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 3729 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3716 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 3730 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3717 ethtool_op_set_tso(dev, 0); 3731 ethtool_op_set_tso(dev, 0);
3718 } 3732 }
3719 else 3733 else
3720 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 3734 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3721 } else { 3735 } else {
3722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 3736 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3723 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 3737 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3724 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 3738 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3725 } 3739 }
@@ -3850,7 +3864,7 @@ static void tg3_init_rings(struct tg3 *tp)
3850 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); 3864 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3851 3865
3852 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; 3866 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3853 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) && 3867 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3854 (tp->dev->mtu > ETH_DATA_LEN)) 3868 (tp->dev->mtu > ETH_DATA_LEN))
3855 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; 3869 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3856 3870
@@ -3905,10 +3919,8 @@ static void tg3_init_rings(struct tg3 *tp)
3905 */ 3919 */
3906static void tg3_free_consistent(struct tg3 *tp) 3920static void tg3_free_consistent(struct tg3 *tp)
3907{ 3921{
3908 if (tp->rx_std_buffers) { 3922 kfree(tp->rx_std_buffers);
3909 kfree(tp->rx_std_buffers); 3923 tp->rx_std_buffers = NULL;
3910 tp->rx_std_buffers = NULL;
3911 }
3912 if (tp->rx_std) { 3924 if (tp->rx_std) {
3913 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 3925 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3914 tp->rx_std, tp->rx_std_mapping); 3926 tp->rx_std, tp->rx_std_mapping);
@@ -4347,7 +4359,7 @@ static int tg3_chip_reset(struct tg3 *tp)
4347 val &= ~PCIX_CAPS_RELAXED_ORDERING; 4359 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4348 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); 4360 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4349 4361
4350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 4362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4351 u32 val; 4363 u32 val;
4352 4364
4353 /* Chip reset on 5780 will reset MSI enable bit, 4365 /* Chip reset on 5780 will reset MSI enable bit,
@@ -6003,7 +6015,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6003 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 6015 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6004 6016
6005 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 6017 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6006 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)) 6018 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6007 limit = 8; 6019 limit = 8;
6008 else 6020 else
6009 limit = 16; 6021 limit = 16;
@@ -6191,14 +6203,16 @@ static void tg3_timer(unsigned long __opaque)
6191 tp->timer_counter = tp->timer_multiplier; 6203 tp->timer_counter = tp->timer_multiplier;
6192 } 6204 }
6193 6205
6194 /* Heartbeat is only sent once every 120 seconds. */ 6206 /* Heartbeat is only sent once every 2 seconds. */
6195 if (!--tp->asf_counter) { 6207 if (!--tp->asf_counter) {
6196 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 6208 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6197 u32 val; 6209 u32 val;
6198 6210
6199 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE); 6211 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6200 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 6212 FWCMD_NICDRV_ALIVE2);
6201 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3); 6213 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6214 /* 5 seconds timeout */
6215 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6202 val = tr32(GRC_RX_CPU_EVENT); 6216 val = tr32(GRC_RX_CPU_EVENT);
6203 val |= (1 << 14); 6217 val |= (1 << 14);
6204 tw32(GRC_RX_CPU_EVENT, val); 6218 tw32(GRC_RX_CPU_EVENT, val);
@@ -6409,7 +6423,7 @@ static int tg3_open(struct net_device *dev)
6409 tp->timer_counter = tp->timer_multiplier = 6423 tp->timer_counter = tp->timer_multiplier =
6410 (HZ / tp->timer_offset); 6424 (HZ / tp->timer_offset);
6411 tp->asf_counter = tp->asf_multiplier = 6425 tp->asf_counter = tp->asf_multiplier =
6412 ((HZ / tp->timer_offset) * 120); 6426 ((HZ / tp->timer_offset) * 2);
6413 6427
6414 init_timer(&tp->timer); 6428 init_timer(&tp->timer);
6415 tp->timer.expires = jiffies + tp->timer_offset; 6429 tp->timer.expires = jiffies + tp->timer_offset;
@@ -7237,7 +7251,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7237 cmd->supported |= (SUPPORTED_1000baseT_Half | 7251 cmd->supported |= (SUPPORTED_1000baseT_Half |
7238 SUPPORTED_1000baseT_Full); 7252 SUPPORTED_1000baseT_Full);
7239 7253
7240 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) 7254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7241 cmd->supported |= (SUPPORTED_100baseT_Half | 7255 cmd->supported |= (SUPPORTED_100baseT_Half |
7242 SUPPORTED_100baseT_Full | 7256 SUPPORTED_100baseT_Full |
7243 SUPPORTED_10baseT_Half | 7257 SUPPORTED_10baseT_Half |
@@ -7264,7 +7278,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7264{ 7278{
7265 struct tg3 *tp = netdev_priv(dev); 7279 struct tg3 *tp = netdev_priv(dev);
7266 7280
7267 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 7281 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7268 /* These are the only valid advertisement bits allowed. */ 7282 /* These are the only valid advertisement bits allowed. */
7269 if (cmd->autoneg == AUTONEG_ENABLE && 7283 if (cmd->autoneg == AUTONEG_ENABLE &&
7270 (cmd->advertising & ~(ADVERTISED_1000baseT_Half | 7284 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
@@ -7272,7 +7286,17 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7272 ADVERTISED_Autoneg | 7286 ADVERTISED_Autoneg |
7273 ADVERTISED_FIBRE))) 7287 ADVERTISED_FIBRE)))
7274 return -EINVAL; 7288 return -EINVAL;
7275 } 7289 /* Fiber can only do SPEED_1000. */
7290 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7291 (cmd->speed != SPEED_1000))
7292 return -EINVAL;
7293 /* Copper cannot force SPEED_1000. */
7294 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7295 (cmd->speed == SPEED_1000))
7296 return -EINVAL;
7297 else if ((cmd->speed == SPEED_1000) &&
7298 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7299 return -EINVAL;
7276 7300
7277 tg3_full_lock(tp, 0); 7301 tg3_full_lock(tp, 0);
7278 7302
@@ -8380,7 +8404,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8380 } 8404 }
8381 8405
8382 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 8406 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8383 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)) { 8407 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8384 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 8408 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8385 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 8409 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8386 tp->nvram_jedecnum = JEDEC_ATMEL; 8410 tp->nvram_jedecnum = JEDEC_ATMEL;
@@ -8980,7 +9004,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8980 9004
8981 tp->phy_id = eeprom_phy_id; 9005 tp->phy_id = eeprom_phy_id;
8982 if (eeprom_phy_serdes) { 9006 if (eeprom_phy_serdes) {
8983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9007 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
8984 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; 9008 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8985 else 9009 else
8986 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 9010 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -9393,8 +9417,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9393 } 9417 }
9394 9418
9395 /* Find msi capability. */ 9419 /* Find msi capability. */
9396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9422 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9397 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9423 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9424 }
9398 9425
9399 /* Initialize misc host control in PCI block. */ 9426 /* Initialize misc host control in PCI block. */
9400 tp->misc_host_ctrl |= (misc_ctrl_reg & 9427 tp->misc_host_ctrl |= (misc_ctrl_reg &
@@ -9412,7 +9439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9412 9439
9413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 9440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 9441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9415 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9442 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9416 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 9443 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9417 9444
9418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || 9445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
@@ -9607,7 +9634,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9607 * ether_setup() via the alloc_etherdev() call 9634 * ether_setup() via the alloc_etherdev() call
9608 */ 9635 */
9609 if (tp->dev->mtu > ETH_DATA_LEN && 9636 if (tp->dev->mtu > ETH_DATA_LEN &&
9610 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780) 9637 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9611 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 9638 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9612 9639
9613 /* Determine WakeOnLan speed to use. */ 9640 /* Determine WakeOnLan speed to use. */
@@ -9830,7 +9857,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
9830 mac_offset = 0x7c; 9857 mac_offset = 0x7c;
9831 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 9858 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9832 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) || 9859 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 9860 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9834 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 9861 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9835 mac_offset = 0xcc; 9862 mac_offset = 0xcc;
9836 if (tg3_nvram_lock(tp)) 9863 if (tg3_nvram_lock(tp))
@@ -10148,6 +10175,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
10148 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 10175 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10149 /* 5780 always in PCIX mode */ 10176 /* 5780 always in PCIX mode */
10150 tp->dma_rwctrl |= 0x00144000; 10177 tp->dma_rwctrl |= 0x00144000;
10178 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10179 /* 5714 always in PCIX mode */
10180 tp->dma_rwctrl |= 0x00148000;
10151 } else { 10181 } else {
10152 tp->dma_rwctrl |= 0x001b000f; 10182 tp->dma_rwctrl |= 0x001b000f;
10153 } 10183 }
@@ -10347,6 +10377,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
10347 case PHY_ID_BCM5705: return "5705"; 10377 case PHY_ID_BCM5705: return "5705";
10348 case PHY_ID_BCM5750: return "5750"; 10378 case PHY_ID_BCM5750: return "5750";
10349 case PHY_ID_BCM5752: return "5752"; 10379 case PHY_ID_BCM5752: return "5752";
10380 case PHY_ID_BCM5714: return "5714";
10350 case PHY_ID_BCM5780: return "5780"; 10381 case PHY_ID_BCM5780: return "5780";
10351 case PHY_ID_BCM8002: return "8002/serdes"; 10382 case PHY_ID_BCM8002: return "8002/serdes";
10352 case 0: return "serdes"; 10383 case 0: return "serdes";
@@ -10492,17 +10523,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10492 } 10523 }
10493 10524
10494 /* Configure DMA attributes. */ 10525 /* Configure DMA attributes. */
10495 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); 10526 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10496 if (!err) { 10527 if (!err) {
10497 pci_using_dac = 1; 10528 pci_using_dac = 1;
10498 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); 10529 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10499 if (err < 0) { 10530 if (err < 0) {
10500 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA " 10531 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10501 "for consistent allocations\n"); 10532 "for consistent allocations\n");
10502 goto err_out_free_res; 10533 goto err_out_free_res;
10503 } 10534 }
10504 } else { 10535 } else {
10505 err = pci_set_dma_mask(pdev, 0xffffffffULL); 10536 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10506 if (err) { 10537 if (err) {
10507 printk(KERN_ERR PFX "No usable DMA configuration, " 10538 printk(KERN_ERR PFX "No usable DMA configuration, "
10508 "aborting.\n"); 10539 "aborting.\n");
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 2e733c60bfa4..fb7e2a5f4a08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -137,6 +137,7 @@
137#define ASIC_REV_5750 0x04 137#define ASIC_REV_5750 0x04
138#define ASIC_REV_5752 0x06 138#define ASIC_REV_5752 0x06
139#define ASIC_REV_5780 0x08 139#define ASIC_REV_5780 0x08
140#define ASIC_REV_5714 0x09
140#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 141#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
141#define CHIPREV_5700_AX 0x70 142#define CHIPREV_5700_AX 0x70
142#define CHIPREV_5700_BX 0x71 143#define CHIPREV_5700_BX 0x71
@@ -531,6 +532,8 @@
531#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 532#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
532#define MAC_SERDES_STAT 0x00000594 533#define MAC_SERDES_STAT 0x00000594
533/* 0x598 --> 0x5b0 unused */ 534/* 0x598 --> 0x5b0 unused */
535#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
536#define SERDES_RX_SIG_DETECT 0x00000400
534#define SG_DIG_CTRL 0x000005b0 537#define SG_DIG_CTRL 0x000005b0
535#define SG_DIG_USING_HW_AUTONEG 0x80000000 538#define SG_DIG_USING_HW_AUTONEG 0x80000000
536#define SG_DIG_SOFT_RESET 0x40000000 539#define SG_DIG_SOFT_RESET 0x40000000
@@ -1329,6 +1332,8 @@
1329#define GRC_LCLCTRL_CLEARINT 0x00000002 1332#define GRC_LCLCTRL_CLEARINT 0x00000002
1330#define GRC_LCLCTRL_SETINT 0x00000004 1333#define GRC_LCLCTRL_SETINT 0x00000004
1331#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 1334#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008
1335#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */
1336#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */
1332#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 1337#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020
1333#define GRC_LCLCTRL_GPIO_OE3 0x00000040 1338#define GRC_LCLCTRL_GPIO_OE3 0x00000040
1334#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 1339#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080
@@ -1507,6 +1512,7 @@
1507#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 1512#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
1508#define FWCMD_NICDRV_FIX_DMAR 0x00000005 1513#define FWCMD_NICDRV_FIX_DMAR 0x00000005
1509#define FWCMD_NICDRV_FIX_DMAW 0x00000006 1514#define FWCMD_NICDRV_FIX_DMAW 0x00000006
1515#define FWCMD_NICDRV_ALIVE2 0x0000000d
1510#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c 1516#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
1511#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 1517#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80
1512#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 1518#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00
@@ -2175,6 +2181,7 @@ struct tg3 {
2175 TG3_FLG2_MII_SERDES) 2181 TG3_FLG2_MII_SERDES)
2176#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2182#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2177#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2183#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2184#define TG3_FLG2_5780_CLASS 0x04000000
2178 2185
2179 u32 split_mode_max_reqs; 2186 u32 split_mode_max_reqs;
2180#define SPLIT_MODE_5704_MAX_REQ 3 2187#define SPLIT_MODE_5704_MAX_REQ 3
@@ -2222,6 +2229,7 @@ struct tg3 {
2222#define PHY_ID_BCM5705 0x600081a0 2229#define PHY_ID_BCM5705 0x600081a0
2223#define PHY_ID_BCM5750 0x60008180 2230#define PHY_ID_BCM5750 0x60008180
2224#define PHY_ID_BCM5752 0x60008100 2231#define PHY_ID_BCM5752 0x60008100
2232#define PHY_ID_BCM5714 0x60008340
2225#define PHY_ID_BCM5780 0x60008350 2233#define PHY_ID_BCM5780 0x60008350
2226#define PHY_ID_BCM8002 0x60010140 2234#define PHY_ID_BCM8002 0x60010140
2227#define PHY_ID_INVALID 0xffffffff 2235#define PHY_ID_INVALID 0xffffffff
@@ -2246,8 +2254,8 @@ struct tg3 {
2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \ 2254 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ 2255 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ 2256 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2249 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \ 2257 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
2250 (X) == PHY_ID_BCM8002) 2258 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002)
2251 2259
2252 struct tg3_hw_stats *hw_stats; 2260 struct tg3_hw_stats *hw_stats;
2253 dma_addr_t stats_mapping; 2261 dma_addr_t stats_mapping;
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
index eb1423ede75c..d04c918ebef8 100644
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -29,6 +29,7 @@ static const char version[] = "proteon.c: v1.00 02/01/2003 by Jochen Friedrich\n
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/trdevice.h> 31#include <linux/trdevice.h>
32#include <linux/platform_device.h>
32 33
33#include <asm/system.h> 34#include <asm/system.h>
34#include <asm/io.h> 35#include <asm/io.h>
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
index 3c7c66204f74..72cf708396be 100644
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -36,6 +36,7 @@ static const char version[] = "skisa.c: v1.03 09/12/2002 by Jochen Friedrich\n";
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/netdevice.h> 37#include <linux/netdevice.h>
38#include <linux/trdevice.h> 38#include <linux/trdevice.h>
39#include <linux/platform_device.h>
39 40
40#include <asm/system.h> 41#include <asm/system.h>
41#include <asm/io.h> 42#include <asm/io.h>
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 6b8eee8f7bfd..d7fb3ffe06ac 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2076,8 +2076,7 @@ static int __init de_init_one (struct pci_dev *pdev,
2076 return 0; 2076 return 0;
2077 2077
2078err_out_iomap: 2078err_out_iomap:
2079 if (de->ee_data) 2079 kfree(de->ee_data);
2080 kfree(de->ee_data);
2081 iounmap(regs); 2080 iounmap(regs);
2082err_out_res: 2081err_out_res:
2083 pci_release_regions(pdev); 2082 pci_release_regions(pdev);
@@ -2096,8 +2095,7 @@ static void __exit de_remove_one (struct pci_dev *pdev)
2096 if (!dev) 2095 if (!dev)
2097 BUG(); 2096 BUG();
2098 unregister_netdev(dev); 2097 unregister_netdev(dev);
2099 if (de->ee_data) 2098 kfree(de->ee_data);
2100 kfree(de->ee_data);
2101 iounmap(de->regs); 2099 iounmap(de->regs);
2102 pci_release_regions(pdev); 2100 pci_release_regions(pdev);
2103 pci_disable_device(pdev); 2101 pci_disable_device(pdev);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 6266a9a7e6e3..125ed00e95a5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1727,8 +1727,7 @@ err_out_free_ring:
1727 tp->rx_ring, tp->rx_ring_dma); 1727 tp->rx_ring, tp->rx_ring_dma);
1728 1728
1729err_out_mtable: 1729err_out_mtable:
1730 if (tp->mtable) 1730 kfree (tp->mtable);
1731 kfree (tp->mtable);
1732 pci_iounmap(pdev, ioaddr); 1731 pci_iounmap(pdev, ioaddr);
1733 1732
1734err_out_free_res: 1733err_out_free_res:
@@ -1806,8 +1805,7 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
1806 sizeof (struct tulip_rx_desc) * RX_RING_SIZE + 1805 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1807 sizeof (struct tulip_tx_desc) * TX_RING_SIZE, 1806 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1808 tp->rx_ring, tp->rx_ring_dma); 1807 tp->rx_ring, tp->rx_ring_dma);
1809 if (tp->mtable) 1808 kfree (tp->mtable);
1810 kfree (tp->mtable);
1811 pci_iounmap(pdev, tp->base_addr); 1809 pci_iounmap(pdev, tp->base_addr);
1812 free_netdev (dev); 1810 free_netdev (dev);
1813 pci_release_regions (pdev); 1811 pci_release_regions (pdev);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index abc5cee6eedc..a368d08e7d19 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1212,10 +1212,8 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1212 velocity_free_td_ring_entry(vptr, j, i); 1212 velocity_free_td_ring_entry(vptr, j, i);
1213 1213
1214 } 1214 }
1215 if (vptr->td_infos[j]) { 1215 kfree(vptr->td_infos[j]);
1216 kfree(vptr->td_infos[j]); 1216 vptr->td_infos[j] = NULL;
1217 vptr->td_infos[j] = NULL;
1218 }
1219 } 1217 }
1220} 1218}
1221 1219
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index ae9e897c255e..e392ee8b37a1 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -400,7 +400,7 @@ static int __init cosa_init(void)
400 goto out_chrdev; 400 goto out_chrdev;
401 } 401 }
402 for (i=0; i<nr_cards; i++) { 402 for (i=0; i<nr_cards; i++) {
403 class_device_create(cosa_class, MKDEV(cosa_major, i), 403 class_device_create(cosa_class, NULL, MKDEV(cosa_major, i),
404 NULL, "cosa%d", i); 404 NULL, "cosa%d", i);
405 err = devfs_mk_cdev(MKDEV(cosa_major, i), 405 err = devfs_mk_cdev(MKDEV(cosa_major, i),
406 S_IFCHR|S_IRUSR|S_IWUSR, 406 S_IFCHR|S_IRUSR|S_IWUSR,
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index cb429e783749..849ac88bcccc 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -35,6 +35,7 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/scatterlist.h>
38#include <asm/io.h> 39#include <asm/io.h>
39#include <asm/system.h> 40#include <asm/system.h>
40 41
@@ -1590,11 +1591,9 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
1590 aes_counter[12] = (u8)(counter >> 24); 1591 aes_counter[12] = (u8)(counter >> 24);
1591 counter++; 1592 counter++;
1592 memcpy (plain, aes_counter, 16); 1593 memcpy (plain, aes_counter, 16);
1593 sg[0].page = virt_to_page(plain); 1594 sg_set_buf(sg, plain, 16);
1594 sg[0].offset = ((long) plain & ~PAGE_MASK);
1595 sg[0].length = 16;
1596 crypto_cipher_encrypt(tfm, sg, sg, 16); 1595 crypto_cipher_encrypt(tfm, sg, sg, 16);
1597 cipher = kmap(sg[0].page) + sg[0].offset; 1596 cipher = kmap(sg->page) + sg->offset;
1598 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { 1597 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
1599 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); 1598 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
1600 j += 4; 1599 j += 4;
@@ -2041,7 +2040,7 @@ static int mpi_send_packet (struct net_device *dev)
2041 return 1; 2040 return 1;
2042} 2041}
2043 2042
2044static void get_tx_error(struct airo_info *ai, u32 fid) 2043static void get_tx_error(struct airo_info *ai, s32 fid)
2045{ 2044{
2046 u16 status; 2045 u16 status;
2047 2046
@@ -2381,14 +2380,10 @@ void stop_airo_card( struct net_device *dev, int freeres )
2381 dev_kfree_skb(skb); 2380 dev_kfree_skb(skb);
2382 } 2381 }
2383 2382
2384 if (ai->flash) 2383 kfree(ai->flash);
2385 kfree(ai->flash); 2384 kfree(ai->rssi);
2386 if (ai->rssi) 2385 kfree(ai->APList);
2387 kfree(ai->rssi); 2386 kfree(ai->SSID);
2388 if (ai->APList)
2389 kfree(ai->APList);
2390 if (ai->SSID)
2391 kfree(ai->SSID);
2392 if (freeres) { 2387 if (freeres) {
2393 /* PCMCIA frees this stuff, so only for PCI and ISA */ 2388 /* PCMCIA frees this stuff, so only for PCI and ISA */
2394 release_region( dev->base_addr, 64 ); 2389 release_region( dev->base_addr, 64 );
@@ -3626,10 +3621,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3626 int rc; 3621 int rc;
3627 3622
3628 memset( &mySsid, 0, sizeof( mySsid ) ); 3623 memset( &mySsid, 0, sizeof( mySsid ) );
3629 if (ai->flash) { 3624 kfree (ai->flash);
3630 kfree (ai->flash); 3625 ai->flash = NULL;
3631 ai->flash = NULL;
3632 }
3633 3626
3634 /* The NOP is the first step in getting the card going */ 3627 /* The NOP is the first step in getting the card going */
3635 cmd.cmd = NOP; 3628 cmd.cmd = NOP;
@@ -3666,14 +3659,10 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3666 tdsRssiRid rssi_rid; 3659 tdsRssiRid rssi_rid;
3667 CapabilityRid cap_rid; 3660 CapabilityRid cap_rid;
3668 3661
3669 if (ai->APList) { 3662 kfree(ai->APList);
3670 kfree(ai->APList); 3663 ai->APList = NULL;
3671 ai->APList = NULL; 3664 kfree(ai->SSID);
3672 } 3665 ai->SSID = NULL;
3673 if (ai->SSID) {
3674 kfree(ai->SSID);
3675 ai->SSID = NULL;
3676 }
3677 // general configuration (read/modify/write) 3666 // general configuration (read/modify/write)
3678 status = readConfigRid(ai, lock); 3667 status = readConfigRid(ai, lock);
3679 if ( status != SUCCESS ) return ERROR; 3668 if ( status != SUCCESS ) return ERROR;
@@ -3687,10 +3676,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3687 memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ 3676 memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
3688 } 3677 }
3689 else { 3678 else {
3690 if (ai->rssi) { 3679 kfree(ai->rssi);
3691 kfree(ai->rssi); 3680 ai->rssi = NULL;
3692 ai->rssi = NULL;
3693 }
3694 if (cap_rid.softCap & 8) 3681 if (cap_rid.softCap & 8)
3695 ai->config.rmode |= RXMODE_NORMALIZED_RSSI; 3682 ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
3696 else 3683 else
@@ -5369,11 +5356,13 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5369 5356
5370static int proc_close( struct inode *inode, struct file *file ) 5357static int proc_close( struct inode *inode, struct file *file )
5371{ 5358{
5372 struct proc_data *data = (struct proc_data *)file->private_data; 5359 struct proc_data *data = file->private_data;
5373 if ( data->on_close != NULL ) data->on_close( inode, file ); 5360
5374 if ( data->rbuffer ) kfree( data->rbuffer ); 5361 if (data->on_close != NULL)
5375 if ( data->wbuffer ) kfree( data->wbuffer ); 5362 data->on_close(inode, file);
5376 kfree( data ); 5363 kfree(data->rbuffer);
5364 kfree(data->wbuffer);
5365 kfree(data);
5377 return 0; 5366 return 0;
5378} 5367}
5379 5368
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index bf25584d68d3..784de9109113 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -258,9 +258,7 @@ static void airo_detach(dev_link_t *link)
258 258
259 /* Unlink device structure, free pieces */ 259 /* Unlink device structure, free pieces */
260 *linkp = link->next; 260 *linkp = link->next;
261 if (link->priv) { 261 kfree(link->priv);
262 kfree(link->priv);
263 }
264 kfree(link); 262 kfree(link);
265 263
266} /* airo_detach */ 264} /* airo_detach */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d57011028b72..1fbe027d26b6 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1653,8 +1653,7 @@ void stop_atmel_card(struct net_device *dev, int freeres)
1653 unregister_netdev(dev); 1653 unregister_netdev(dev);
1654 remove_proc_entry("driver/atmel", NULL); 1654 remove_proc_entry("driver/atmel", NULL);
1655 free_irq(dev->irq, dev); 1655 free_irq(dev->irq, dev);
1656 if (priv->firmware) 1656 kfree(priv->firmware);
1657 kfree(priv->firmware);
1658 if (freeres) { 1657 if (freeres) {
1659 /* PCMCIA frees this stuff, so only for PCI */ 1658 /* PCMCIA frees this stuff, so only for PCI */
1660 release_region(dev->base_addr, 64); 1659 release_region(dev->base_addr, 64);
@@ -2450,8 +2449,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2450 break; 2449 break;
2451 } 2450 }
2452 2451
2453 if (priv->firmware) 2452 kfree(priv->firmware);
2454 kfree(priv->firmware);
2455 2453
2456 priv->firmware = new_firmware; 2454 priv->firmware = new_firmware;
2457 priv->firmware_length = com.len; 2455 priv->firmware_length = com.len;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ff031a3985b3..195cb36619e8 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -259,8 +259,7 @@ static void atmel_detach(dev_link_t *link)
259 259
260 /* Unlink device structure, free pieces */ 260 /* Unlink device structure, free pieces */
261 *linkp = link->next; 261 *linkp = link->next;
262 if (link->priv) 262 kfree(link->priv);
263 kfree(link->priv);
264 kfree(link); 263 kfree(link);
265} 264}
266 265
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index eba0d9d2b7c5..579480dad374 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -444,6 +444,43 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
444 return err; 444 return err;
445} 445}
446 446
447/* Write a block of data to the chip's buffer with padding if
448 * neccessary, via the BAP. Synchronization/serialization is the
449 * caller's problem. len must be even.
450 *
451 * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
452 */
453int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, unsigned len,
454 u16 id, u16 offset)
455{
456 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
457 int err = 0;
458
459 if (len < 0 || len % 2 || data_len > len)
460 return -EINVAL;
461
462 err = hermes_bap_seek(hw, bap, id, offset);
463 if (err)
464 goto out;
465
466 /* Transfer all the complete words of data */
467 hermes_write_words(hw, dreg, buf, data_len/2);
468 /* If there is an odd byte left over pad and transfer it */
469 if (data_len & 1) {
470 u8 end[2];
471 end[1] = 0;
472 end[0] = ((unsigned char *)buf)[data_len - 1];
473 hermes_write_words(hw, dreg, end, 1);
474 data_len ++;
475 }
476 /* Now send zeros for the padding */
477 if (data_len < len)
478 hermes_clear_words(hw, dreg, (len - data_len) / 2);
479 /* Complete */
480 out:
481 return err;
482}
483
447/* Read a Length-Type-Value record from the card. 484/* Read a Length-Type-Value record from the card.
448 * 485 *
449 * If length is NULL, we ignore the length read from the card, and 486 * If length is NULL, we ignore the length read from the card, and
@@ -531,6 +568,7 @@ EXPORT_SYMBOL(hermes_allocate);
531 568
532EXPORT_SYMBOL(hermes_bap_pread); 569EXPORT_SYMBOL(hermes_bap_pread);
533EXPORT_SYMBOL(hermes_bap_pwrite); 570EXPORT_SYMBOL(hermes_bap_pwrite);
571EXPORT_SYMBOL(hermes_bap_pwrite_pad);
534EXPORT_SYMBOL(hermes_read_ltv); 572EXPORT_SYMBOL(hermes_read_ltv);
535EXPORT_SYMBOL(hermes_write_ltv); 573EXPORT_SYMBOL(hermes_write_ltv);
536 574
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index ad28e3294360..a6bd472d75d4 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -376,6 +376,8 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
376 u16 id, u16 offset); 376 u16 id, u16 offset);
377int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len, 377int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
378 u16 id, u16 offset); 378 u16 id, u16 offset);
379int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf,
380 unsigned data_len, unsigned len, u16 id, u16 offset);
379int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen, 381int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
380 u16 *length, void *buf); 382 u16 *length, void *buf);
381int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 383int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 53f5246c40aa..2617d70bcda9 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -552,7 +552,6 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
552 552
553 kfree(addr); 553 kfree(addr);
554 kfree(qual); 554 kfree(qual);
555
556 return 0; 555 return 0;
557} 556}
558 557
@@ -3081,9 +3080,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
3081 ret = local->func->download(local, param); 3080 ret = local->func->download(local, param);
3082 3081
3083 out: 3082 out:
3084 if (param != NULL) 3083 kfree(param);
3085 kfree(param);
3086
3087 return ret; 3084 return ret;
3088} 3085}
3089#endif /* PRISM2_DOWNLOAD_SUPPORT */ 3086#endif /* PRISM2_DOWNLOAD_SUPPORT */
@@ -3890,9 +3887,7 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
3890 } 3887 }
3891 3888
3892 out: 3889 out:
3893 if (param != NULL) 3890 kfree(param);
3894 kfree(param);
3895
3896 return ret; 3891 return ret;
3897} 3892}
3898 3893
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index de4e6c23e4b8..3db0c32afe82 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4030,6 +4030,10 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4030 int i; 4030 int i;
4031 4031
4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL); 4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4033 if (unlikely(!rxq)) {
4034 IPW_ERROR("memory allocation failed\n");
4035 return NULL;
4036 }
4033 memset(rxq, 0, sizeof(*rxq)); 4037 memset(rxq, 0, sizeof(*rxq));
4034 spin_lock_init(&rxq->lock); 4038 spin_lock_init(&rxq->lock);
4035 INIT_LIST_HEAD(&rxq->rx_free); 4039 INIT_LIST_HEAD(&rxq->rx_free);
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d3d4ec9e242e..488ab06fb79f 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -490,7 +490,8 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
490 return 0; 490 return 0;
491 } 491 }
492 492
493 /* Check packet length, pad short packets, round up odd length */ 493 /* Length of the packet body */
494 /* FIXME: what if the skb is smaller than this? */
494 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN); 495 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
495 skb = skb_padto(skb, len); 496 skb = skb_padto(skb, len);
496 if (skb == NULL) 497 if (skb == NULL)
@@ -541,13 +542,21 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
541 stats->tx_errors++; 542 stats->tx_errors++;
542 goto fail; 543 goto fail;
543 } 544 }
545 /* Actual xfer length - allow for padding */
546 len = ALIGN(data_len, 2);
547 if (len < ETH_ZLEN - ETH_HLEN)
548 len = ETH_ZLEN - ETH_HLEN;
544 } else { /* IEEE 802.3 frame */ 549 } else { /* IEEE 802.3 frame */
545 data_len = len + ETH_HLEN; 550 data_len = len + ETH_HLEN;
546 data_off = HERMES_802_3_OFFSET; 551 data_off = HERMES_802_3_OFFSET;
547 p = skb->data; 552 p = skb->data;
553 /* Actual xfer length - round up for odd length packets */
554 len = ALIGN(data_len, 2);
555 if (len < ETH_ZLEN)
556 len = ETH_ZLEN;
548 } 557 }
549 558
550 err = hermes_bap_pwrite(hw, USER_BAP, p, data_len, 559 err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
551 txfid, data_off); 560 txfid, data_off);
552 if (err) { 561 if (err) {
553 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 562 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 6c9584a9f284..78bdb359835e 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -754,8 +754,7 @@ islpci_free_memory(islpci_private *priv)
754 pci_unmap_single(priv->pdev, buf->pci_addr, 754 pci_unmap_single(priv->pdev, buf->pci_addr,
755 buf->size, PCI_DMA_FROMDEVICE); 755 buf->size, PCI_DMA_FROMDEVICE);
756 buf->pci_addr = 0; 756 buf->pci_addr = 0;
757 if (buf->mem) 757 kfree(buf->mem);
758 kfree(buf->mem);
759 buf->size = 0; 758 buf->size = 0;
760 buf->mem = NULL; 759 buf->mem = NULL;
761 } 760 }
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 5952e9960499..3b49efa37ee5 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -97,12 +97,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
97 /* lock the driver code */ 97 /* lock the driver code */
98 spin_lock_irqsave(&priv->slock, flags); 98 spin_lock_irqsave(&priv->slock, flags);
99 99
100 /* determine the amount of fragments needed to store the frame */
101
102 frame_size = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
103 if (init_wds)
104 frame_size += 6;
105
106 /* check whether the destination queue has enough fragments for the frame */ 100 /* check whether the destination queue has enough fragments for the frame */
107 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]); 101 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
108 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) { 102 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
@@ -213,6 +207,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
213 /* store the skb address for future freeing */ 207 /* store the skb address for future freeing */
214 priv->data_low_tx[index] = skb; 208 priv->data_low_tx[index] = skb;
215 /* set the proper fragment start address and size information */ 209 /* set the proper fragment start address and size information */
210 frame_size = skb->len;
216 fragment->size = cpu_to_le16(frame_size); 211 fragment->size = cpu_to_le16(frame_size);
217 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */ 212 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
218 fragment->address = cpu_to_le32(pci_map_address); 213 fragment->address = cpu_to_le32(pci_map_address);
@@ -246,12 +241,10 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
246 return 0; 241 return 0;
247 242
248 drop_free: 243 drop_free:
249 /* free the skbuf structure before aborting */
250 dev_kfree_skb(skb);
251 skb = NULL;
252
253 priv->statistics.tx_dropped++; 244 priv->statistics.tx_dropped++;
254 spin_unlock_irqrestore(&priv->slock, flags); 245 spin_unlock_irqrestore(&priv->slock, flags);
246 dev_kfree_skb(skb);
247 skb = NULL;
255 return err; 248 return err;
256} 249}
257 250
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index 4937a5ad4b2c..6a60c5970cb5 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -137,7 +137,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
137 PCI_DMA_FROMDEVICE); 137 PCI_DMA_FROMDEVICE);
138 if (!buf->pci_addr) { 138 if (!buf->pci_addr) {
139 printk(KERN_WARNING 139 printk(KERN_WARNING
140 "Failed to make memory DMA'able\n."); 140 "Failed to make memory DMA'able.\n");
141 return -ENOMEM; 141 return -ENOMEM;
142 } 142 }
143 } 143 }
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 12123e24b113..eea2f04c8c6d 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -268,11 +268,10 @@ mgt_clean(islpci_private *priv)
268 268
269 if (!priv->mib) 269 if (!priv->mib)
270 return; 270 return;
271 for (i = 0; i < OID_NUM_LAST; i++) 271 for (i = 0; i < OID_NUM_LAST; i++) {
272 if (priv->mib[i]) { 272 kfree(priv->mib[i]);
273 kfree(priv->mib[i]); 273 priv->mib[i] = NULL;
274 priv->mib[i] = NULL; 274 }
275 }
276 kfree(priv->mib); 275 kfree(priv->mib);
277 priv->mib = NULL; 276 priv->mib = NULL;
278} 277}
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 7bc7fc823128..d25264ba0c0e 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -860,12 +860,9 @@ static int allocate_buffers(struct strip *strip_info, int mtu)
860 strip_info->mtu = dev->mtu = mtu; 860 strip_info->mtu = dev->mtu = mtu;
861 return (1); 861 return (1);
862 } 862 }
863 if (r) 863 kfree(r);
864 kfree(r); 864 kfree(s);
865 if (s) 865 kfree(t);
866 kfree(s);
867 if (t)
868 kfree(t);
869 return (0); 866 return (0);
870} 867}
871 868
@@ -922,13 +919,9 @@ static int strip_change_mtu(struct net_device *dev, int new_mtu)
922 printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n", 919 printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n",
923 strip_info->dev->name, old_mtu, strip_info->mtu); 920 strip_info->dev->name, old_mtu, strip_info->mtu);
924 921
925 if (orbuff) 922 kfree(orbuff);
926 kfree(orbuff); 923 kfree(osbuff);
927 if (osbuff) 924 kfree(otbuff);
928 kfree(osbuff);
929 if (otbuff)
930 kfree(otbuff);
931
932 return 0; 925 return 0;
933} 926}
934 927
@@ -2498,18 +2491,13 @@ static int strip_close_low(struct net_device *dev)
2498 /* 2491 /*
2499 * Free all STRIP frame buffers. 2492 * Free all STRIP frame buffers.
2500 */ 2493 */
2501 if (strip_info->rx_buff) { 2494 kfree(strip_info->rx_buff);
2502 kfree(strip_info->rx_buff); 2495 strip_info->rx_buff = NULL;
2503 strip_info->rx_buff = NULL; 2496 kfree(strip_info->sx_buff);
2504 } 2497 strip_info->sx_buff = NULL;
2505 if (strip_info->sx_buff) { 2498 kfree(strip_info->tx_buff);
2506 kfree(strip_info->sx_buff); 2499 strip_info->tx_buff = NULL;
2507 strip_info->sx_buff = NULL; 2500
2508 }
2509 if (strip_info->tx_buff) {
2510 kfree(strip_info->tx_buff);
2511 strip_info->tx_buff = NULL;
2512 }
2513 del_timer(&strip_info->idle_timer); 2501 del_timer(&strip_info->idle_timer);
2514 return 0; 2502 return 0;
2515} 2503}