aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/net/Kconfig77
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/acenic.c6
-rw-r--r--[-rwxr-xr-x]drivers/net/amd8111e.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/amd8111e.h0
-rw-r--r--drivers/net/au1000_eth.c6
-rw-r--r--drivers/net/b44.c28
-rw-r--r--drivers/net/bmac.c6
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/e1000/e1000_ethtool.c7
-rw-r--r--drivers/net/e1000/e1000_main.c9
-rw-r--r--drivers/net/eepro.c7
-rw-r--r--drivers/net/fs_enet/Kconfig20
-rw-r--r--drivers/net/fs_enet/Makefile10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1226
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c507
-rw-r--r--drivers/net/fs_enet/fs_enet.h245
-rw-r--r--drivers/net/fs_enet/mac-fcc.c578
-rw-r--r--drivers/net/fs_enet/mac-fec.c653
-rw-r--r--drivers/net/fs_enet/mac-scc.c524
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c405
-rw-r--r--drivers/net/fs_enet/mii-fixed.c92
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/ibm_emac/Makefile13
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h408
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c3396
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.h313
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.c363
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.h63
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.c674
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h332
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.c335
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.h105
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.c201
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.h60
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.c111
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.h96
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.c255
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.h104
-rw-r--r--drivers/net/ibmveth.c186
-rw-r--r--drivers/net/ibmveth.h23
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/irda-usb.c6
-rw-r--r--drivers/net/irda/irport.c3
-rw-r--r--drivers/net/irda/sir_dev.c3
-rw-r--r--drivers/net/irda/vlsi_ir.c3
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/ne2k-pci.c1
-rw-r--r--drivers/net/ni65.c9
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c6
-rw-r--r--drivers/net/rrunner.c6
-rw-r--r--drivers/net/s2io.c3
-rw-r--r--drivers/net/saa9730.c8
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c16
-rw-r--r--drivers/net/smc91x.c4
-rw-r--r--drivers/net/starfire.c4
-rw-r--r--drivers/net/sundance.c62
-rw-r--r--drivers/net/tg3.c91
-rw-r--r--drivers/net/tg3.h12
-rw-r--r--drivers/net/tulip/de2104x.c6
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wireless/airo.c48
-rw-r--r--drivers/net/wireless/airo_cs.c4
-rw-r--r--drivers/net/wireless/atmel.c6
-rw-r--r--drivers/net/wireless/atmel_cs.c3
-rw-r--r--drivers/net/wireless/hermes.c38
-rw-r--r--drivers/net/wireless/hermes.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c9
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/orinoco.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c13
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c9
-rw-r--r--drivers/net/wireless/strip.c38
-rw-r--r--include/linux/fs_enet_pd.h136
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/net/ax25.h3
-rw-r--r--include/net/netrom.h3
81 files changed, 8921 insertions, 3155 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 770155a01523..251a28e2d4cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1945,6 +1945,14 @@ M: george@mvista.com
1945L: netdev@vger.kernel.org 1945L: netdev@vger.kernel.org
1946S: Supported 1946S: Supported
1947 1947
1948POWERPC 4xx EMAC DRIVER
1949P: Eugene Surovegin
1950M: ebs@ebshome.net
1951W: http://kernel.ebshome.net/emac/
1952L: linuxppc-embedded@ozlabs.org
1953L: netdev@vger.kernel.org
1954S: Maintained
1955
1948PNP SUPPORT 1956PNP SUPPORT
1949P: Adam Belay 1957P: Adam Belay
1950M: ambx1@neo.rr.com 1958M: ambx1@neo.rr.com
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fee8c5cf1f3a..6d4f9ceb0a32 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1163,38 +1163,74 @@ config IBMVETH
1163 be called ibmveth. 1163 be called ibmveth.
1164 1164
1165config IBM_EMAC 1165config IBM_EMAC
1166 bool "IBM PPC4xx EMAC driver support" 1166 tristate "PowerPC 4xx on-chip Ethernet support"
1167 depends on 4xx 1167 depends on 4xx
1168 select CRC32 1168 help
1169 ---help--- 1169 This driver supports the PowerPC 4xx EMAC family of on-chip
1170 This driver supports the IBM PPC4xx EMAC family of on-chip 1170 Ethernet controllers.
1171 Ethernet controllers.
1172
1173config IBM_EMAC_ERRMSG
1174 bool "Verbose error messages"
1175 depends on IBM_EMAC && BROKEN
1176 1171
1177config IBM_EMAC_RXB 1172config IBM_EMAC_RXB
1178 int "Number of receive buffers" 1173 int "Number of receive buffers"
1179 depends on IBM_EMAC 1174 depends on IBM_EMAC
1180 default "128" if IBM_EMAC4 1175 default "128"
1181 default "64"
1182 1176
1183config IBM_EMAC_TXB 1177config IBM_EMAC_TXB
1184 int "Number of transmit buffers" 1178 int "Number of transmit buffers"
1185 depends on IBM_EMAC 1179 depends on IBM_EMAC
1186 default "128" if IBM_EMAC4 1180 default "64"
1187 default "8" 1181
1182config IBM_EMAC_POLL_WEIGHT
1183 int "MAL NAPI polling weight"
1184 depends on IBM_EMAC
1185 default "32"
1188 1186
1189config IBM_EMAC_FGAP 1187config IBM_EMAC_RX_COPY_THRESHOLD
1190 int "Frame gap" 1188 int "RX skb copy threshold (bytes)"
1191 depends on IBM_EMAC 1189 depends on IBM_EMAC
1192 default "8" 1190 default "256"
1193 1191
1194config IBM_EMAC_SKBRES 1192config IBM_EMAC_RX_SKB_HEADROOM
1195 int "Skb reserve amount" 1193 int "Additional RX skb headroom (bytes)"
1196 depends on IBM_EMAC 1194 depends on IBM_EMAC
1197 default "0" 1195 default "0"
1196 help
1197 Additional receive skb headroom. Note, that driver
1198 will always reserve at least 2 bytes to make IP header
1199 aligned, so usualy there is no need to add any additional
1200 headroom.
1201
1202 If unsure, set to 0.
1203
1204config IBM_EMAC_PHY_RX_CLK_FIX
1205 bool "PHY Rx clock workaround"
1206 depends on IBM_EMAC && (405EP || 440GX || 440EP)
1207 help
1208 Enable this if EMAC attached to a PHY which doesn't generate
1209 RX clock if there is no link, if this is the case, you will
1210 see "TX disable timeout" or "RX disable timeout" in the system
1211 log.
1212
1213 If unsure, say N.
1214
1215config IBM_EMAC_DEBUG
1216 bool "Debugging"
1217 depends on IBM_EMAC
1218 default n
1219
1220config IBM_EMAC_ZMII
1221 bool
1222 depends on IBM_EMAC && (NP405H || NP405L || 44x)
1223 default y
1224
1225config IBM_EMAC_RGMII
1226 bool
1227 depends on IBM_EMAC && 440GX
1228 default y
1229
1230config IBM_EMAC_TAH
1231 bool
1232 depends on IBM_EMAC && 440GX
1233 default y
1198 1234
1199config NET_PCI 1235config NET_PCI
1200 bool "EISA, VLB, PCI and on board controllers" 1236 bool "EISA, VLB, PCI and on board controllers"
@@ -1775,6 +1811,7 @@ config NE_H8300
1775 controller on the Renesas H8/300 processor. 1811 controller on the Renesas H8/300 processor.
1776 1812
1777source "drivers/net/fec_8xx/Kconfig" 1813source "drivers/net/fec_8xx/Kconfig"
1814source "drivers/net/fs_enet/Kconfig"
1778 1815
1779endmenu 1816endmenu
1780 1817
@@ -2201,8 +2238,8 @@ config S2IO
2201 depends on PCI 2238 depends on PCI
2202 ---help--- 2239 ---help---
2203 This driver supports the 10Gbe XFrame NIC of S2IO. 2240 This driver supports the 10Gbe XFrame NIC of S2IO.
2204 For help regarding driver compilation, installation and 2241 More specific information on configuring the driver is in
2205 tuning please look into ~/drivers/net/s2io/README.txt. 2242 <file:Documentation/networking/s2io.txt>.
2206 2243
2207config S2IO_NAPI 2244config S2IO_NAPI
2208 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)" 2245 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1a84e0435f64..7c313cb341b8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -203,3 +203,6 @@ obj-$(CONFIG_IRDA) += irda/
203obj-$(CONFIG_ETRAX_ETHERNET) += cris/ 203obj-$(CONFIG_ETRAX_ETHERNET) += cris/
204 204
205obj-$(CONFIG_NETCONSOLE) += netconsole.o 205obj-$(CONFIG_NETCONSOLE) += netconsole.o
206
207obj-$(CONFIG_FS_ENET) += fs_enet/
208
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index dbecc6bf7851..b8953de5664a 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -871,10 +871,8 @@ static void ace_init_cleanup(struct net_device *dev)
871 if (ap->info) 871 if (ap->info)
872 pci_free_consistent(ap->pdev, sizeof(struct ace_info), 872 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
873 ap->info, ap->info_dma); 873 ap->info, ap->info_dma);
874 if (ap->skb) 874 kfree(ap->skb);
875 kfree(ap->skb); 875 kfree(ap->trace_buf);
876 if (ap->trace_buf)
877 kfree(ap->trace_buf);
878 876
879 if (dev->irq) 877 if (dev->irq)
880 free_irq(dev->irq, dev); 878 free_irq(dev->irq, dev);
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index d9ba8be72af8..d9ba8be72af8 100755..100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index cfe3a4298822..cfe3a4298822 100755..100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 78506911d656..332e9953c55c 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -1606,8 +1606,7 @@ err_out:
1606 /* here we should have a valid dev plus aup-> register addresses 1606 /* here we should have a valid dev plus aup-> register addresses
1607 * so we can reset the mac properly.*/ 1607 * so we can reset the mac properly.*/
1608 reset_mac(dev); 1608 reset_mac(dev);
1609 if (aup->mii) 1609 kfree(aup->mii);
1610 kfree(aup->mii);
1611 for (i = 0; i < NUM_RX_DMA; i++) { 1610 for (i = 0; i < NUM_RX_DMA; i++) {
1612 if (aup->rx_db_inuse[i]) 1611 if (aup->rx_db_inuse[i])
1613 ReleaseDB(aup, aup->rx_db_inuse[i]); 1612 ReleaseDB(aup, aup->rx_db_inuse[i]);
@@ -1806,8 +1805,7 @@ static void __exit au1000_cleanup_module(void)
1806 if (dev) { 1805 if (dev) {
1807 aup = (struct au1000_private *) dev->priv; 1806 aup = (struct au1000_private *) dev->priv;
1808 unregister_netdev(dev); 1807 unregister_netdev(dev);
1809 if (aup->mii) 1808 kfree(aup->mii);
1810 kfree(aup->mii);
1811 for (j = 0; j < NUM_RX_DMA; j++) { 1809 for (j = 0; j < NUM_RX_DMA; j++) {
1812 if (aup->rx_db_inuse[j]) 1810 if (aup->rx_db_inuse[j])
1813 ReleaseDB(aup, aup->rx_db_inuse[j]); 1811 ReleaseDB(aup, aup->rx_db_inuse[j]);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 282ebd15f011..0ee3e27969c6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/version.h> 21#include <linux/version.h>
22#include <linux/dma-mapping.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
@@ -1130,14 +1131,10 @@ static void b44_init_rings(struct b44 *bp)
1130 */ 1131 */
1131static void b44_free_consistent(struct b44 *bp) 1132static void b44_free_consistent(struct b44 *bp)
1132{ 1133{
1133 if (bp->rx_buffers) { 1134 kfree(bp->rx_buffers);
1134 kfree(bp->rx_buffers); 1135 bp->rx_buffers = NULL;
1135 bp->rx_buffers = NULL; 1136 kfree(bp->tx_buffers);
1136 } 1137 bp->tx_buffers = NULL;
1137 if (bp->tx_buffers) {
1138 kfree(bp->tx_buffers);
1139 bp->tx_buffers = NULL;
1140 }
1141 if (bp->rx_ring) { 1138 if (bp->rx_ring) {
1142 if (bp->flags & B44_FLAG_RX_RING_HACK) { 1139 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1143 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma, 1140 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
@@ -1619,14 +1616,14 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1619 1616
1620 cmd->advertising = 0; 1617 cmd->advertising = 0;
1621 if (bp->flags & B44_FLAG_ADV_10HALF) 1618 if (bp->flags & B44_FLAG_ADV_10HALF)
1622 cmd->advertising |= ADVERTISE_10HALF; 1619 cmd->advertising |= ADVERTISED_10baseT_Half;
1623 if (bp->flags & B44_FLAG_ADV_10FULL) 1620 if (bp->flags & B44_FLAG_ADV_10FULL)
1624 cmd->advertising |= ADVERTISE_10FULL; 1621 cmd->advertising |= ADVERTISED_10baseT_Full;
1625 if (bp->flags & B44_FLAG_ADV_100HALF) 1622 if (bp->flags & B44_FLAG_ADV_100HALF)
1626 cmd->advertising |= ADVERTISE_100HALF; 1623 cmd->advertising |= ADVERTISED_100baseT_Half;
1627 if (bp->flags & B44_FLAG_ADV_100FULL) 1624 if (bp->flags & B44_FLAG_ADV_100FULL)
1628 cmd->advertising |= ADVERTISE_100FULL; 1625 cmd->advertising |= ADVERTISED_100baseT_Full;
1629 cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1626 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1630 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? 1627 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1631 SPEED_100 : SPEED_10; 1628 SPEED_100 : SPEED_10;
1632 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? 1629 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
@@ -2044,6 +2041,8 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2044 b44_free_rings(bp); 2041 b44_free_rings(bp);
2045 2042
2046 spin_unlock_irq(&bp->lock); 2043 spin_unlock_irq(&bp->lock);
2044
2045 free_irq(dev->irq, dev);
2047 pci_disable_device(pdev); 2046 pci_disable_device(pdev);
2048 return 0; 2047 return 0;
2049} 2048}
@@ -2060,6 +2059,9 @@ static int b44_resume(struct pci_dev *pdev)
2060 if (!netif_running(dev)) 2059 if (!netif_running(dev))
2061 return 0; 2060 return 0;
2062 2061
2062 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2063 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2064
2063 spin_lock_irq(&bp->lock); 2065 spin_lock_irq(&bp->lock);
2064 2066
2065 b44_init_rings(bp); 2067 b44_init_rings(bp);
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 60dba4a1ca5c..73f2fcfc557f 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1689,10 +1689,8 @@ static void __exit bmac_exit(void)
1689{ 1689{
1690 macio_unregister_driver(&bmac_driver); 1690 macio_unregister_driver(&bmac_driver);
1691 1691
1692 if (bmac_emergency_rxbuf != NULL) { 1692 kfree(bmac_emergency_rxbuf);
1693 kfree(bmac_emergency_rxbuf); 1693 bmac_emergency_rxbuf = NULL;
1694 bmac_emergency_rxbuf = NULL;
1695 }
1696} 1694}
1697 1695
1698MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1696MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 3a2ace01e444..11d252318221 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -314,20 +314,16 @@ bnx2_free_mem(struct bnx2 *bp)
314 bp->tx_desc_ring, bp->tx_desc_mapping); 314 bp->tx_desc_ring, bp->tx_desc_mapping);
315 bp->tx_desc_ring = NULL; 315 bp->tx_desc_ring = NULL;
316 } 316 }
317 if (bp->tx_buf_ring) { 317 kfree(bp->tx_buf_ring);
318 kfree(bp->tx_buf_ring); 318 bp->tx_buf_ring = NULL;
319 bp->tx_buf_ring = NULL;
320 }
321 if (bp->rx_desc_ring) { 319 if (bp->rx_desc_ring) {
322 pci_free_consistent(bp->pdev, 320 pci_free_consistent(bp->pdev,
323 sizeof(struct rx_bd) * RX_DESC_CNT, 321 sizeof(struct rx_bd) * RX_DESC_CNT,
324 bp->rx_desc_ring, bp->rx_desc_mapping); 322 bp->rx_desc_ring, bp->rx_desc_mapping);
325 bp->rx_desc_ring = NULL; 323 bp->rx_desc_ring = NULL;
326 } 324 }
327 if (bp->rx_buf_ring) { 325 kfree(bp->rx_buf_ring);
328 kfree(bp->rx_buf_ring); 326 bp->rx_buf_ring = NULL;
329 bp->rx_buf_ring = NULL;
330 }
331} 327}
332 328
333static int 329static int
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 6b9acc7f94a3..9c7feaeaa6a4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -965,11 +965,8 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
965 if(rxdr->desc) 965 if(rxdr->desc)
966 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 966 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
967 967
968 if(txdr->buffer_info) 968 kfree(txdr->buffer_info);
969 kfree(txdr->buffer_info); 969 kfree(rxdr->buffer_info);
970 if(rxdr->buffer_info)
971 kfree(rxdr->buffer_info);
972
973 return; 970 return;
974} 971}
975 972
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 6b72f6acdd54..efbbda7cbcbf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -191,8 +191,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
191static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 191static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
192static void e1000_restore_vlan(struct e1000_adapter *adapter); 192static void e1000_restore_vlan(struct e1000_adapter *adapter);
193 193
194static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
195#ifdef CONFIG_PM 194#ifdef CONFIG_PM
195static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
196static int e1000_resume(struct pci_dev *pdev); 196static int e1000_resume(struct pci_dev *pdev);
197#endif 197#endif
198 198
@@ -1149,7 +1149,8 @@ e1000_setup_tx_resources(struct e1000_adapter *adapter,
1149 int size; 1149 int size;
1150 1150
1151 size = sizeof(struct e1000_buffer) * txdr->count; 1151 size = sizeof(struct e1000_buffer) * txdr->count;
1152 txdr->buffer_info = vmalloc(size); 1152
1153 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1153 if(!txdr->buffer_info) { 1154 if(!txdr->buffer_info) {
1154 DPRINTK(PROBE, ERR, 1155 DPRINTK(PROBE, ERR,
1155 "Unable to allocate memory for the transmit descriptor ring\n"); 1156 "Unable to allocate memory for the transmit descriptor ring\n");
@@ -1366,7 +1367,7 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
1366 int size, desc_len; 1367 int size, desc_len;
1367 1368
1368 size = sizeof(struct e1000_buffer) * rxdr->count; 1369 size = sizeof(struct e1000_buffer) * rxdr->count;
1369 rxdr->buffer_info = vmalloc(size); 1370 rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1370 if (!rxdr->buffer_info) { 1371 if (!rxdr->buffer_info) {
1371 DPRINTK(PROBE, ERR, 1372 DPRINTK(PROBE, ERR,
1372 "Unable to allocate memory for the receive descriptor ring\n"); 1373 "Unable to allocate memory for the receive descriptor ring\n");
@@ -4193,6 +4194,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4193 return 0; 4194 return 0;
4194} 4195}
4195 4196
4197#ifdef CONFIG_PM
4196static int 4198static int
4197e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4199e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4198{ 4200{
@@ -4289,7 +4291,6 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4289 return 0; 4291 return 0;
4290} 4292}
4291 4293
4292#ifdef CONFIG_PM
4293static int 4294static int
4294e1000_resume(struct pci_dev *pdev) 4295e1000_resume(struct pci_dev *pdev)
4295{ 4296{
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index dcb3028bb60f..1ce2c675b8a7 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1797,10 +1797,9 @@ MODULE_AUTHOR("Pascal Dupuis and others");
1797MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); 1797MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
1798MODULE_LICENSE("GPL"); 1798MODULE_LICENSE("GPL");
1799 1799
1800static int num_params; 1800module_param_array(io, int, NULL, 0);
1801module_param_array(io, int, &num_params, 0); 1801module_param_array(irq, int, NULL, 0);
1802module_param_array(irq, int, &num_params, 0); 1802module_param_array(mem, int, NULL, 0);
1803module_param_array(mem, int, &num_params, 0);
1804module_param(autodetect, int, 0); 1803module_param(autodetect, int, 0);
1805MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); 1804MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
1806MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); 1805MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
new file mode 100644
index 000000000000..6aaee67dd4b7
--- /dev/null
+++ b/drivers/net/fs_enet/Kconfig
@@ -0,0 +1,20 @@
1config FS_ENET
2 tristate "Freescale Ethernet Driver"
3 depends on NET_ETHERNET && (CPM1 || CPM2)
4 select MII
5
6config FS_ENET_HAS_SCC
7 bool "Chip has an SCC usable for ethernet"
8 depends on FS_ENET && (CPM1 || CPM2)
9 default y
10
11config FS_ENET_HAS_FCC
12 bool "Chip has an FCC usable for ethernet"
13 depends on FS_ENET && CPM2
14 default y
15
16config FS_ENET_HAS_FEC
17 bool "Chip has an FEC usable for ethernet"
18 depends on FS_ENET && CPM1
19 default y
20
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
new file mode 100644
index 000000000000..d6dd3f2fb43e
--- /dev/null
+++ b/drivers/net/fs_enet/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Freescale Ethernet controllers
3#
4
5obj-$(CONFIG_FS_ENET) += fs_enet.o
6
7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
8obj-$(CONFIG_8260) += mac-fcc.o
9
10fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
new file mode 100644
index 000000000000..44fac7373289
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -0,0 +1,1226 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/mii.h>
37#include <linux/ethtool.h>
38#include <linux/bitops.h>
39#include <linux/fs.h>
40
41#include <linux/vmalloc.h>
42#include <asm/pgtable.h>
43
44#include <asm/pgtable.h>
45#include <asm/irq.h>
46#include <asm/uaccess.h>
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52static char version[] __devinitdata =
53 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
54
55MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
56MODULE_DESCRIPTION("Freescale Ethernet Driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_MODULE_VERSION);
59
60MODULE_PARM(fs_enet_debug, "i");
61MODULE_PARM_DESC(fs_enet_debug,
62 "Freescale bitmapped debugging message enable value");
63
64int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
65
66static void fs_set_multicast_list(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 (*fep->ops->set_multicast_list)(dev);
71}
72
73/* NAPI receive function */
74static int fs_enet_rx_napi(struct net_device *dev, int *budget)
75{
76 struct fs_enet_private *fep = netdev_priv(dev);
77 const struct fs_platform_info *fpi = fep->fpi;
78 cbd_t *bdp;
79 struct sk_buff *skb, *skbn, *skbt;
80 int received = 0;
81 u16 pkt_len, sc;
82 int curidx;
83 int rx_work_limit = 0; /* pacify gcc */
84
85 rx_work_limit = min(dev->quota, *budget);
86
87 if (!netif_running(dev))
88 return 0;
89
90 /*
91 * First, grab all of the stats for the incoming packet.
92 * These get messed up if we get called due to a busy condition.
93 */
94 bdp = fep->cur_rx;
95
96 /* clear RX status bits for napi*/
97 (*fep->ops->napi_clear_rx_event)(dev);
98
99 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
100
101 curidx = bdp - fep->rx_bd_base;
102
103 /*
104 * Since we have allocated space to hold a complete frame,
105 * the last indicator should be set.
106 */
107 if ((sc & BD_ENET_RX_LAST) == 0)
108 printk(KERN_WARNING DRV_MODULE_NAME
109 ": %s rcv is not +last\n",
110 dev->name);
111
112 /*
113 * Check for errors.
114 */
115 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
116 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
117 fep->stats.rx_errors++;
118 /* Frame too long or too short. */
119 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
120 fep->stats.rx_length_errors++;
121 /* Frame alignment */
122 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
123 fep->stats.rx_frame_errors++;
124 /* CRC Error */
125 if (sc & BD_ENET_RX_CR)
126 fep->stats.rx_crc_errors++;
127 /* FIFO overrun */
128 if (sc & BD_ENET_RX_OV)
129 fep->stats.rx_crc_errors++;
130
131 skb = fep->rx_skbuff[curidx];
132
133 dma_unmap_single(fep->dev, skb->data,
134 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
135 DMA_FROM_DEVICE);
136
137 skbn = skb;
138
139 } else {
140
141 /* napi, got packet but no quota */
142 if (--rx_work_limit < 0)
143 break;
144
145 skb = fep->rx_skbuff[curidx];
146
147 dma_unmap_single(fep->dev, skb->data,
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149 DMA_FROM_DEVICE);
150
151 /*
152 * Process the incoming frame.
153 */
154 fep->stats.rx_packets++;
155 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
156 fep->stats.rx_bytes += pkt_len + 4;
157
158 if (pkt_len <= fpi->rx_copybreak) {
159 /* +2 to make IP header L1 cache aligned */
160 skbn = dev_alloc_skb(pkt_len + 2);
161 if (skbn != NULL) {
162 skb_reserve(skbn, 2); /* align IP header */
163 memcpy(skbn->data, skb->data, pkt_len);
164 /* swap */
165 skbt = skb;
166 skb = skbn;
167 skbn = skbt;
168 }
169 } else
170 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
171
172 if (skbn != NULL) {
173 skb->dev = dev;
174 skb_put(skb, pkt_len); /* Make room */
175 skb->protocol = eth_type_trans(skb, dev);
176 received++;
177 netif_receive_skb(skb);
178 } else {
179 printk(KERN_WARNING DRV_MODULE_NAME
180 ": %s Memory squeeze, dropping packet.\n",
181 dev->name);
182 fep->stats.rx_dropped++;
183 skbn = skb;
184 }
185 }
186
187 fep->rx_skbuff[curidx] = skbn;
188 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
189 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
190 DMA_FROM_DEVICE));
191 CBDW_DATLEN(bdp, 0);
192 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
193
194 /*
195 * Update BD pointer to next entry.
196 */
197 if ((sc & BD_ENET_RX_WRAP) == 0)
198 bdp++;
199 else
200 bdp = fep->rx_bd_base;
201
202 (*fep->ops->rx_bd_done)(dev);
203 }
204
205 fep->cur_rx = bdp;
206
207 dev->quota -= received;
208 *budget -= received;
209
210 if (rx_work_limit < 0)
211 return 1; /* not done */
212
213 /* done */
214 netif_rx_complete(dev);
215
216 (*fep->ops->napi_enable_rx)(dev);
217
218 return 0;
219}
220
221/* non NAPI receive function */
222static int fs_enet_rx_non_napi(struct net_device *dev)
223{
224 struct fs_enet_private *fep = netdev_priv(dev);
225 const struct fs_platform_info *fpi = fep->fpi;
226 cbd_t *bdp;
227 struct sk_buff *skb, *skbn, *skbt;
228 int received = 0;
229 u16 pkt_len, sc;
230 int curidx;
231 /*
232 * First, grab all of the stats for the incoming packet.
233 * These get messed up if we get called due to a busy condition.
234 */
235 bdp = fep->cur_rx;
236
237 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
238
239 curidx = bdp - fep->rx_bd_base;
240
241 /*
242 * Since we have allocated space to hold a complete frame,
243 * the last indicator should be set.
244 */
245 if ((sc & BD_ENET_RX_LAST) == 0)
246 printk(KERN_WARNING DRV_MODULE_NAME
247 ": %s rcv is not +last\n",
248 dev->name);
249
250 /*
251 * Check for errors.
252 */
253 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
254 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
255 fep->stats.rx_errors++;
256 /* Frame too long or too short. */
257 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
258 fep->stats.rx_length_errors++;
259 /* Frame alignment */
260 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
261 fep->stats.rx_frame_errors++;
262 /* CRC Error */
263 if (sc & BD_ENET_RX_CR)
264 fep->stats.rx_crc_errors++;
265 /* FIFO overrun */
266 if (sc & BD_ENET_RX_OV)
267 fep->stats.rx_crc_errors++;
268
269 skb = fep->rx_skbuff[curidx];
270
271 dma_unmap_single(fep->dev, skb->data,
272 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
273 DMA_FROM_DEVICE);
274
275 skbn = skb;
276
277 } else {
278
279 skb = fep->rx_skbuff[curidx];
280
281 dma_unmap_single(fep->dev, skb->data,
282 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
283 DMA_FROM_DEVICE);
284
285 /*
286 * Process the incoming frame.
287 */
288 fep->stats.rx_packets++;
289 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
290 fep->stats.rx_bytes += pkt_len + 4;
291
292 if (pkt_len <= fpi->rx_copybreak) {
293 /* +2 to make IP header L1 cache aligned */
294 skbn = dev_alloc_skb(pkt_len + 2);
295 if (skbn != NULL) {
296 skb_reserve(skbn, 2); /* align IP header */
297 memcpy(skbn->data, skb->data, pkt_len);
298 /* swap */
299 skbt = skb;
300 skb = skbn;
301 skbn = skbt;
302 }
303 } else
304 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
305
306 if (skbn != NULL) {
307 skb->dev = dev;
308 skb_put(skb, pkt_len); /* Make room */
309 skb->protocol = eth_type_trans(skb, dev);
310 received++;
311 netif_rx(skb);
312 } else {
313 printk(KERN_WARNING DRV_MODULE_NAME
314 ": %s Memory squeeze, dropping packet.\n",
315 dev->name);
316 fep->stats.rx_dropped++;
317 skbn = skb;
318 }
319 }
320
321 fep->rx_skbuff[curidx] = skbn;
322 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
323 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
324 DMA_FROM_DEVICE));
325 CBDW_DATLEN(bdp, 0);
326 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
327
328 /*
329 * Update BD pointer to next entry.
330 */
331 if ((sc & BD_ENET_RX_WRAP) == 0)
332 bdp++;
333 else
334 bdp = fep->rx_bd_base;
335
336 (*fep->ops->rx_bd_done)(dev);
337 }
338
339 fep->cur_rx = bdp;
340
341 return 0;
342}
343
344static void fs_enet_tx(struct net_device *dev)
345{
346 struct fs_enet_private *fep = netdev_priv(dev);
347 cbd_t *bdp;
348 struct sk_buff *skb;
349 int dirtyidx, do_wake, do_restart;
350 u16 sc;
351
352 spin_lock(&fep->lock);
353 bdp = fep->dirty_tx;
354
355 do_wake = do_restart = 0;
356 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
357
358 dirtyidx = bdp - fep->tx_bd_base;
359
360 if (fep->tx_free == fep->tx_ring)
361 break;
362
363 skb = fep->tx_skbuff[dirtyidx];
364
365 /*
366 * Check for errors.
367 */
368 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
369 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
370
371 if (sc & BD_ENET_TX_HB) /* No heartbeat */
372 fep->stats.tx_heartbeat_errors++;
373 if (sc & BD_ENET_TX_LC) /* Late collision */
374 fep->stats.tx_window_errors++;
375 if (sc & BD_ENET_TX_RL) /* Retrans limit */
376 fep->stats.tx_aborted_errors++;
377 if (sc & BD_ENET_TX_UN) /* Underrun */
378 fep->stats.tx_fifo_errors++;
379 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
380 fep->stats.tx_carrier_errors++;
381
382 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
383 fep->stats.tx_errors++;
384 do_restart = 1;
385 }
386 } else
387 fep->stats.tx_packets++;
388
389 if (sc & BD_ENET_TX_READY)
390 printk(KERN_WARNING DRV_MODULE_NAME
391 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
392 dev->name);
393
394 /*
395 * Deferred means some collisions occurred during transmit,
396 * but we eventually sent the packet OK.
397 */
398 if (sc & BD_ENET_TX_DEF)
399 fep->stats.collisions++;
400
401 /* unmap */
402 dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
403
404 /*
405 * Free the sk buffer associated with this last transmit.
406 */
407 dev_kfree_skb_irq(skb);
408 fep->tx_skbuff[dirtyidx] = NULL;
409
410 /*
411 * Update pointer to next buffer descriptor to be transmitted.
412 */
413 if ((sc & BD_ENET_TX_WRAP) == 0)
414 bdp++;
415 else
416 bdp = fep->tx_bd_base;
417
418 /*
419 * Since we have freed up a buffer, the ring is no longer
420 * full.
421 */
422 if (!fep->tx_free++)
423 do_wake = 1;
424 }
425
426 fep->dirty_tx = bdp;
427
428 if (do_restart)
429 (*fep->ops->tx_restart)(dev);
430
431 spin_unlock(&fep->lock);
432
433 if (do_wake)
434 netif_wake_queue(dev);
435}
436
437/*
438 * The interrupt handler.
439 * This is called from the MPC core interrupt.
440 */
441static irqreturn_t
442fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
443{
444 struct net_device *dev = dev_id;
445 struct fs_enet_private *fep;
446 const struct fs_platform_info *fpi;
447 u32 int_events;
448 u32 int_clr_events;
449 int nr, napi_ok;
450 int handled;
451
452 fep = netdev_priv(dev);
453 fpi = fep->fpi;
454
455 nr = 0;
456 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
457
458 nr++;
459
460 int_clr_events = int_events;
461 if (fpi->use_napi)
462 int_clr_events &= ~fep->ev_napi_rx;
463
464 (*fep->ops->clear_int_events)(dev, int_clr_events);
465
466 if (int_events & fep->ev_err)
467 (*fep->ops->ev_error)(dev, int_events);
468
469 if (int_events & fep->ev_rx) {
470 if (!fpi->use_napi)
471 fs_enet_rx_non_napi(dev);
472 else {
473 napi_ok = netif_rx_schedule_prep(dev);
474
475 (*fep->ops->napi_disable_rx)(dev);
476 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
477
478 /* NOTE: it is possible for FCCs in NAPI mode */
479 /* to submit a spurious interrupt while in poll */
480 if (napi_ok)
481 __netif_rx_schedule(dev);
482 }
483 }
484
485 if (int_events & fep->ev_tx)
486 fs_enet_tx(dev);
487 }
488
489 handled = nr > 0;
490 return IRQ_RETVAL(handled);
491}
492
493void fs_init_bds(struct net_device *dev)
494{
495 struct fs_enet_private *fep = netdev_priv(dev);
496 cbd_t *bdp;
497 struct sk_buff *skb;
498 int i;
499
500 fs_cleanup_bds(dev);
501
502 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
503 fep->tx_free = fep->tx_ring;
504 fep->cur_rx = fep->rx_bd_base;
505
506 /*
507 * Initialize the receive buffer descriptors.
508 */
509 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
510 skb = dev_alloc_skb(ENET_RX_FRSIZE);
511 if (skb == NULL) {
512 printk(KERN_WARNING DRV_MODULE_NAME
513 ": %s Memory squeeze, unable to allocate skb\n",
514 dev->name);
515 break;
516 }
517 fep->rx_skbuff[i] = skb;
518 skb->dev = dev;
519 CBDW_BUFADDR(bdp,
520 dma_map_single(fep->dev, skb->data,
521 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
522 DMA_FROM_DEVICE));
523 CBDW_DATLEN(bdp, 0); /* zero */
524 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
525 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
526 }
527 /*
528 * if we failed, fillup remainder
529 */
530 for (; i < fep->rx_ring; i++, bdp++) {
531 fep->rx_skbuff[i] = NULL;
532 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
533 }
534
535 /*
536 * ...and the same for transmit.
537 */
538 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
539 fep->tx_skbuff[i] = NULL;
540 CBDW_BUFADDR(bdp, 0);
541 CBDW_DATLEN(bdp, 0);
542 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
543 }
544}
545
546void fs_cleanup_bds(struct net_device *dev)
547{
548 struct fs_enet_private *fep = netdev_priv(dev);
549 struct sk_buff *skb;
550 int i;
551
552 /*
553 * Reset SKB transmit buffers.
554 */
555 for (i = 0; i < fep->tx_ring; i++) {
556 if ((skb = fep->tx_skbuff[i]) == NULL)
557 continue;
558
559 /* unmap */
560 dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
561
562 fep->tx_skbuff[i] = NULL;
563 dev_kfree_skb(skb);
564 }
565
566 /*
567 * Reset SKB receive buffers
568 */
569 for (i = 0; i < fep->rx_ring; i++) {
570 if ((skb = fep->rx_skbuff[i]) == NULL)
571 continue;
572
573 /* unmap */
574 dma_unmap_single(fep->dev, skb->data,
575 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
576 DMA_FROM_DEVICE);
577
578 fep->rx_skbuff[i] = NULL;
579
580 dev_kfree_skb(skb);
581 }
582}
583
584/**********************************************************************************/
585
586static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
587{
588 struct fs_enet_private *fep = netdev_priv(dev);
589 cbd_t *bdp;
590 int curidx;
591 u16 sc;
592 unsigned long flags;
593
594 spin_lock_irqsave(&fep->tx_lock, flags);
595
596 /*
597 * Fill in a Tx ring entry
598 */
599 bdp = fep->cur_tx;
600
601 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
602 netif_stop_queue(dev);
603 spin_unlock_irqrestore(&fep->tx_lock, flags);
604
605 /*
606 * Ooops. All transmit buffers are full. Bail out.
607 * This should not happen, since the tx queue should be stopped.
608 */
609 printk(KERN_WARNING DRV_MODULE_NAME
610 ": %s tx queue full!.\n", dev->name);
611 return NETDEV_TX_BUSY;
612 }
613
614 curidx = bdp - fep->tx_bd_base;
615 /*
616 * Clear all of the status flags.
617 */
618 CBDC_SC(bdp, BD_ENET_TX_STATS);
619
620 /*
621 * Save skb pointer.
622 */
623 fep->tx_skbuff[curidx] = skb;
624
625 fep->stats.tx_bytes += skb->len;
626
627 /*
628 * Push the data cache so the CPM does not get stale memory data.
629 */
630 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
631 skb->data, skb->len, DMA_TO_DEVICE));
632 CBDW_DATLEN(bdp, skb->len);
633
634 dev->trans_start = jiffies;
635
636 /*
637 * If this was the last BD in the ring, start at the beginning again.
638 */
639 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
640 fep->cur_tx++;
641 else
642 fep->cur_tx = fep->tx_bd_base;
643
644 if (!--fep->tx_free)
645 netif_stop_queue(dev);
646
647 /* Trigger transmission start */
648 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
649 BD_ENET_TX_LAST | BD_ENET_TX_TC;
650
651 /* note that while FEC does not have this bit
652 * it marks it as available for software use
653 * yay for hw reuse :) */
654 if (skb->len <= 60)
655 sc |= BD_ENET_TX_PAD;
656 CBDS_SC(bdp, sc);
657
658 (*fep->ops->tx_kickstart)(dev);
659
660 spin_unlock_irqrestore(&fep->tx_lock, flags);
661
662 return NETDEV_TX_OK;
663}
664
665static int fs_request_irq(struct net_device *dev, int irq, const char *name,
666 irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs))
667{
668 struct fs_enet_private *fep = netdev_priv(dev);
669
670 (*fep->ops->pre_request_irq)(dev, irq);
671 return request_irq(irq, irqf, SA_SHIRQ, name, dev);
672}
673
674static void fs_free_irq(struct net_device *dev, int irq)
675{
676 struct fs_enet_private *fep = netdev_priv(dev);
677
678 free_irq(irq, dev);
679 (*fep->ops->post_free_irq)(dev, irq);
680}
681
682/**********************************************************************************/
683
684/* This interrupt occurs when the PHY detects a link change. */
685static irqreturn_t
686fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
687{
688 struct net_device *dev = dev_id;
689 struct fs_enet_private *fep;
690 const struct fs_platform_info *fpi;
691
692 fep = netdev_priv(dev);
693 fpi = fep->fpi;
694
695 /*
696 * Acknowledge the interrupt if possible. If we have not
697 * found the PHY yet we can't process or acknowledge the
698 * interrupt now. Instead we ignore this interrupt for now,
699 * which we can do since it is edge triggered. It will be
700 * acknowledged later by fs_enet_open().
701 */
702 if (!fep->phy)
703 return IRQ_NONE;
704
705 fs_mii_ack_int(dev);
706 fs_mii_link_status_change_check(dev, 0);
707
708 return IRQ_HANDLED;
709}
710
711static void fs_timeout(struct net_device *dev)
712{
713 struct fs_enet_private *fep = netdev_priv(dev);
714 unsigned long flags;
715 int wake = 0;
716
717 fep->stats.tx_errors++;
718
719 spin_lock_irqsave(&fep->lock, flags);
720
721 if (dev->flags & IFF_UP) {
722 (*fep->ops->stop)(dev);
723 (*fep->ops->restart)(dev);
724 }
725
726 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
727 spin_unlock_irqrestore(&fep->lock, flags);
728
729 if (wake)
730 netif_wake_queue(dev);
731}
732
733static int fs_enet_open(struct net_device *dev)
734{
735 struct fs_enet_private *fep = netdev_priv(dev);
736 const struct fs_platform_info *fpi = fep->fpi;
737 int r;
738
739 /* Install our interrupt handler. */
740 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
741 if (r != 0) {
742 printk(KERN_ERR DRV_MODULE_NAME
743 ": %s Could not allocate FEC IRQ!", dev->name);
744 return -EINVAL;
745 }
746
747 /* Install our phy interrupt handler */
748 if (fpi->phy_irq != -1) {
749
750 r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
751 if (r != 0) {
752 printk(KERN_ERR DRV_MODULE_NAME
753 ": %s Could not allocate PHY IRQ!", dev->name);
754 fs_free_irq(dev, fep->interrupt);
755 return -EINVAL;
756 }
757 }
758
759 fs_mii_startup(dev);
760 netif_carrier_off(dev);
761 fs_mii_link_status_change_check(dev, 1);
762
763 return 0;
764}
765
766static int fs_enet_close(struct net_device *dev)
767{
768 struct fs_enet_private *fep = netdev_priv(dev);
769 const struct fs_platform_info *fpi = fep->fpi;
770 unsigned long flags;
771
772 netif_stop_queue(dev);
773 netif_carrier_off(dev);
774 fs_mii_shutdown(dev);
775
776 spin_lock_irqsave(&fep->lock, flags);
777 (*fep->ops->stop)(dev);
778 spin_unlock_irqrestore(&fep->lock, flags);
779
780 /* release any irqs */
781 if (fpi->phy_irq != -1)
782 fs_free_irq(dev, fpi->phy_irq);
783 fs_free_irq(dev, fep->interrupt);
784
785 return 0;
786}
787
788static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
789{
790 struct fs_enet_private *fep = netdev_priv(dev);
791 return &fep->stats;
792}
793
794/*************************************************************************/
795
796static void fs_get_drvinfo(struct net_device *dev,
797 struct ethtool_drvinfo *info)
798{
799 strcpy(info->driver, DRV_MODULE_NAME);
800 strcpy(info->version, DRV_MODULE_VERSION);
801}
802
803static int fs_get_regs_len(struct net_device *dev)
804{
805 struct fs_enet_private *fep = netdev_priv(dev);
806
807 return (*fep->ops->get_regs_len)(dev);
808}
809
810static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
811 void *p)
812{
813 struct fs_enet_private *fep = netdev_priv(dev);
814 unsigned long flags;
815 int r, len;
816
817 len = regs->len;
818
819 spin_lock_irqsave(&fep->lock, flags);
820 r = (*fep->ops->get_regs)(dev, p, &len);
821 spin_unlock_irqrestore(&fep->lock, flags);
822
823 if (r == 0)
824 regs->version = 0;
825}
826
827static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
828{
829 struct fs_enet_private *fep = netdev_priv(dev);
830 unsigned long flags;
831 int rc;
832
833 spin_lock_irqsave(&fep->lock, flags);
834 rc = mii_ethtool_gset(&fep->mii_if, cmd);
835 spin_unlock_irqrestore(&fep->lock, flags);
836
837 return rc;
838}
839
840static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
841{
842 struct fs_enet_private *fep = netdev_priv(dev);
843 unsigned long flags;
844 int rc;
845
846 spin_lock_irqsave(&fep->lock, flags);
847 rc = mii_ethtool_sset(&fep->mii_if, cmd);
848 spin_unlock_irqrestore(&fep->lock, flags);
849
850 return rc;
851}
852
853static int fs_nway_reset(struct net_device *dev)
854{
855 struct fs_enet_private *fep = netdev_priv(dev);
856 return mii_nway_restart(&fep->mii_if);
857}
858
859static u32 fs_get_msglevel(struct net_device *dev)
860{
861 struct fs_enet_private *fep = netdev_priv(dev);
862 return fep->msg_enable;
863}
864
865static void fs_set_msglevel(struct net_device *dev, u32 value)
866{
867 struct fs_enet_private *fep = netdev_priv(dev);
868 fep->msg_enable = value;
869}
870
871static struct ethtool_ops fs_ethtool_ops = {
872 .get_drvinfo = fs_get_drvinfo,
873 .get_regs_len = fs_get_regs_len,
874 .get_settings = fs_get_settings,
875 .set_settings = fs_set_settings,
876 .nway_reset = fs_nway_reset,
877 .get_link = ethtool_op_get_link,
878 .get_msglevel = fs_get_msglevel,
879 .set_msglevel = fs_set_msglevel,
880 .get_tx_csum = ethtool_op_get_tx_csum,
881 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
882 .get_sg = ethtool_op_get_sg,
883 .set_sg = ethtool_op_set_sg,
884 .get_regs = fs_get_regs,
885};
886
887static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
888{
889 struct fs_enet_private *fep = netdev_priv(dev);
890 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
891 unsigned long flags;
892 int rc;
893
894 if (!netif_running(dev))
895 return -EINVAL;
896
897 spin_lock_irqsave(&fep->lock, flags);
898 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
899 spin_unlock_irqrestore(&fep->lock, flags);
900 return rc;
901}
902
903extern int fs_mii_connect(struct net_device *dev);
904extern void fs_mii_disconnect(struct net_device *dev);
905
906static struct net_device *fs_init_instance(struct device *dev,
907 const struct fs_platform_info *fpi)
908{
909 struct net_device *ndev = NULL;
910 struct fs_enet_private *fep = NULL;
911 int privsize, i, r, err = 0, registered = 0;
912
913 /* guard */
914 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
915 return ERR_PTR(-EINVAL);
916
917 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
918 (fpi->rx_ring + fpi->tx_ring));
919
920 ndev = alloc_etherdev(privsize);
921 if (!ndev) {
922 err = -ENOMEM;
923 goto err;
924 }
925 SET_MODULE_OWNER(ndev);
926
927 fep = netdev_priv(ndev);
928 memset(fep, 0, privsize); /* clear everything */
929
930 fep->dev = dev;
931 dev_set_drvdata(dev, ndev);
932 fep->fpi = fpi;
933 if (fpi->init_ioports)
934 fpi->init_ioports();
935
936#ifdef CONFIG_FS_ENET_HAS_FEC
937 if (fs_get_fec_index(fpi->fs_no) >= 0)
938 fep->ops = &fs_fec_ops;
939#endif
940
941#ifdef CONFIG_FS_ENET_HAS_SCC
942 if (fs_get_scc_index(fpi->fs_no) >=0 )
943 fep->ops = &fs_scc_ops;
944#endif
945
946#ifdef CONFIG_FS_ENET_HAS_FCC
947 if (fs_get_fcc_index(fpi->fs_no) >= 0)
948 fep->ops = &fs_fcc_ops;
949#endif
950
951 if (fep->ops == NULL) {
952 printk(KERN_ERR DRV_MODULE_NAME
953 ": %s No matching ops found (%d).\n",
954 ndev->name, fpi->fs_no);
955 err = -EINVAL;
956 goto err;
957 }
958
959 r = (*fep->ops->setup_data)(ndev);
960 if (r != 0) {
961 printk(KERN_ERR DRV_MODULE_NAME
962 ": %s setup_data failed\n",
963 ndev->name);
964 err = r;
965 goto err;
966 }
967
968 /* point rx_skbuff, tx_skbuff */
969 fep->rx_skbuff = (struct sk_buff **)&fep[1];
970 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
971
972 /* init locks */
973 spin_lock_init(&fep->lock);
974 spin_lock_init(&fep->tx_lock);
975
976 /*
977 * Set the Ethernet address.
978 */
979 for (i = 0; i < 6; i++)
980 ndev->dev_addr[i] = fpi->macaddr[i];
981
982 r = (*fep->ops->allocate_bd)(ndev);
983
984 if (fep->ring_base == NULL) {
985 printk(KERN_ERR DRV_MODULE_NAME
986 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
987 err = r;
988 goto err;
989 }
990
991 /*
992 * Set receive and transmit descriptor base.
993 */
994 fep->rx_bd_base = fep->ring_base;
995 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
996
997 /* initialize ring size variables */
998 fep->tx_ring = fpi->tx_ring;
999 fep->rx_ring = fpi->rx_ring;
1000
1001 /*
1002 * The FEC Ethernet specific entries in the device structure.
1003 */
1004 ndev->open = fs_enet_open;
1005 ndev->hard_start_xmit = fs_enet_start_xmit;
1006 ndev->tx_timeout = fs_timeout;
1007 ndev->watchdog_timeo = 2 * HZ;
1008 ndev->stop = fs_enet_close;
1009 ndev->get_stats = fs_enet_get_stats;
1010 ndev->set_multicast_list = fs_set_multicast_list;
1011 if (fpi->use_napi) {
1012 ndev->poll = fs_enet_rx_napi;
1013 ndev->weight = fpi->napi_weight;
1014 }
1015 ndev->ethtool_ops = &fs_ethtool_ops;
1016 ndev->do_ioctl = fs_ioctl;
1017
1018 init_timer(&fep->phy_timer_list);
1019
1020 netif_carrier_off(ndev);
1021
1022 err = register_netdev(ndev);
1023 if (err != 0) {
1024 printk(KERN_ERR DRV_MODULE_NAME
1025 ": %s register_netdev failed.\n", ndev->name);
1026 goto err;
1027 }
1028 registered = 1;
1029
1030 err = fs_mii_connect(ndev);
1031 if (err != 0) {
1032 printk(KERN_ERR DRV_MODULE_NAME
1033 ": %s fs_mii_connect failed.\n", ndev->name);
1034 goto err;
1035 }
1036
1037 return ndev;
1038
1039 err:
1040 if (ndev != NULL) {
1041
1042 if (registered)
1043 unregister_netdev(ndev);
1044
1045 if (fep != NULL) {
1046 (*fep->ops->free_bd)(ndev);
1047 (*fep->ops->cleanup_data)(ndev);
1048 }
1049
1050 free_netdev(ndev);
1051 }
1052
1053 dev_set_drvdata(dev, NULL);
1054
1055 return ERR_PTR(err);
1056}
1057
1058static int fs_cleanup_instance(struct net_device *ndev)
1059{
1060 struct fs_enet_private *fep;
1061 const struct fs_platform_info *fpi;
1062 struct device *dev;
1063
1064 if (ndev == NULL)
1065 return -EINVAL;
1066
1067 fep = netdev_priv(ndev);
1068 if (fep == NULL)
1069 return -EINVAL;
1070
1071 fpi = fep->fpi;
1072
1073 fs_mii_disconnect(ndev);
1074
1075 unregister_netdev(ndev);
1076
1077 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1078 fep->ring_base, fep->ring_mem_addr);
1079
1080 /* reset it */
1081 (*fep->ops->cleanup_data)(ndev);
1082
1083 dev = fep->dev;
1084 if (dev != NULL) {
1085 dev_set_drvdata(dev, NULL);
1086 fep->dev = NULL;
1087 }
1088
1089 free_netdev(ndev);
1090
1091 return 0;
1092}
1093
1094/**************************************************************************************/
1095
1096/* handy pointer to the immap */
1097void *fs_enet_immap = NULL;
1098
1099static int setup_immap(void)
1100{
1101 phys_addr_t paddr = 0;
1102 unsigned long size = 0;
1103
1104#ifdef CONFIG_CPM1
1105 paddr = IMAP_ADDR;
1106 size = 0x10000; /* map 64K */
1107#endif
1108
1109#ifdef CONFIG_CPM2
1110 paddr = CPM_MAP_ADDR;
1111 size = 0x40000; /* map 256 K */
1112#endif
1113 fs_enet_immap = ioremap(paddr, size);
1114 if (fs_enet_immap == NULL)
1115 return -EBADF; /* XXX ahem; maybe just BUG_ON? */
1116
1117 return 0;
1118}
1119
1120static void cleanup_immap(void)
1121{
1122 if (fs_enet_immap != NULL) {
1123 iounmap(fs_enet_immap);
1124 fs_enet_immap = NULL;
1125 }
1126}
1127
1128/**************************************************************************************/
1129
1130static int __devinit fs_enet_probe(struct device *dev)
1131{
1132 struct net_device *ndev;
1133
1134 /* no fixup - no device */
1135 if (dev->platform_data == NULL) {
1136 printk(KERN_INFO "fs_enet: "
1137 "probe called with no platform data; "
1138 "remove unused devices\n");
1139 return -ENODEV;
1140 }
1141
1142 ndev = fs_init_instance(dev, dev->platform_data);
1143 if (IS_ERR(ndev))
1144 return PTR_ERR(ndev);
1145 return 0;
1146}
1147
1148static int fs_enet_remove(struct device *dev)
1149{
1150 return fs_cleanup_instance(dev_get_drvdata(dev));
1151}
1152
1153static struct device_driver fs_enet_fec_driver = {
1154 .name = "fsl-cpm-fec",
1155 .bus = &platform_bus_type,
1156 .probe = fs_enet_probe,
1157 .remove = fs_enet_remove,
1158#ifdef CONFIG_PM
1159/* .suspend = fs_enet_suspend, TODO */
1160/* .resume = fs_enet_resume, TODO */
1161#endif
1162};
1163
1164static struct device_driver fs_enet_scc_driver = {
1165 .name = "fsl-cpm-scc",
1166 .bus = &platform_bus_type,
1167 .probe = fs_enet_probe,
1168 .remove = fs_enet_remove,
1169#ifdef CONFIG_PM
1170/* .suspend = fs_enet_suspend, TODO */
1171/* .resume = fs_enet_resume, TODO */
1172#endif
1173};
1174
1175static struct device_driver fs_enet_fcc_driver = {
1176 .name = "fsl-cpm-fcc",
1177 .bus = &platform_bus_type,
1178 .probe = fs_enet_probe,
1179 .remove = fs_enet_remove,
1180#ifdef CONFIG_PM
1181/* .suspend = fs_enet_suspend, TODO */
1182/* .resume = fs_enet_resume, TODO */
1183#endif
1184};
1185
1186static int __init fs_init(void)
1187{
1188 int r;
1189
1190 printk(KERN_INFO
1191 "%s", version);
1192
1193 r = setup_immap();
1194 if (r != 0)
1195 return r;
1196 r = driver_register(&fs_enet_fec_driver);
1197 if (r != 0)
1198 goto err;
1199
1200 r = driver_register(&fs_enet_fcc_driver);
1201 if (r != 0)
1202 goto err;
1203
1204 r = driver_register(&fs_enet_scc_driver);
1205 if (r != 0)
1206 goto err;
1207
1208 return 0;
1209err:
1210 cleanup_immap();
1211 return r;
1212
1213}
1214
1215static void __exit fs_cleanup(void)
1216{
1217 driver_unregister(&fs_enet_fec_driver);
1218 driver_unregister(&fs_enet_fcc_driver);
1219 driver_unregister(&fs_enet_scc_driver);
1220 cleanup_immap();
1221}
1222
1223/**************************************************************************************/
1224
1225module_init(fs_init);
1226module_exit(fs_cleanup);
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
new file mode 100644
index 000000000000..c6770377ef87
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -0,0 +1,507 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/string.h>
25#include <linux/ptrace.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/spinlock.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39#include <linux/bitops.h>
40
41#include <asm/pgtable.h>
42#include <asm/irq.h>
43#include <asm/uaccess.h>
44
45#include "fs_enet.h"
46
47/*************************************************/
48
49/*
50 * Generic PHY support.
51 * Should work for all PHYs, but link change is detected by polling
52 */
53
54static void generic_timer_callback(unsigned long data)
55{
56 struct net_device *dev = (struct net_device *)data;
57 struct fs_enet_private *fep = netdev_priv(dev);
58
59 fep->phy_timer_list.expires = jiffies + HZ / 2;
60
61 add_timer(&fep->phy_timer_list);
62
63 fs_mii_link_status_change_check(dev, 0);
64}
65
66static void generic_startup(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
71 fep->phy_timer_list.data = (unsigned long)dev;
72 fep->phy_timer_list.function = generic_timer_callback;
73 add_timer(&fep->phy_timer_list);
74}
75
76static void generic_shutdown(struct net_device *dev)
77{
78 struct fs_enet_private *fep = netdev_priv(dev);
79
80 del_timer_sync(&fep->phy_timer_list);
81}
82
83/* ------------------------------------------------------------------------- */
84/* The Davicom DM9161 is used on the NETTA board */
85
86/* register definitions */
87
88#define MII_DM9161_ANAR 4 /* Aux. Config Register */
89#define MII_DM9161_ACR 16 /* Aux. Config Register */
90#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
91#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
92#define MII_DM9161_INTR 21 /* Interrupt Register */
93#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
94#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
95
96static void dm9161_startup(struct net_device *dev)
97{
98 struct fs_enet_private *fep = netdev_priv(dev);
99
100 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
101 /* Start autonegotiation */
102 fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
103
104 set_current_state(TASK_UNINTERRUPTIBLE);
105 schedule_timeout(HZ*8);
106}
107
108static void dm9161_ack_int(struct net_device *dev)
109{
110 struct fs_enet_private *fep = netdev_priv(dev);
111
112 fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
113}
114
115static void dm9161_shutdown(struct net_device *dev)
116{
117 struct fs_enet_private *fep = netdev_priv(dev);
118
119 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
120}
121
122/**********************************************************************************/
123
124static const struct phy_info phy_info[] = {
125 {
126 .id = 0x00181b88,
127 .name = "DM9161",
128 .startup = dm9161_startup,
129 .ack_int = dm9161_ack_int,
130 .shutdown = dm9161_shutdown,
131 }, {
132 .id = 0,
133 .name = "GENERIC",
134 .startup = generic_startup,
135 .shutdown = generic_shutdown,
136 },
137};
138
139/**********************************************************************************/
140
141static int phy_id_detect(struct net_device *dev)
142{
143 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145 struct fs_enet_mii_bus *bus = fep->mii_bus;
146 int i, r, start, end, phytype, physubtype;
147 const struct phy_info *phy;
148 int phy_hwid, phy_id;
149
150 phy_hwid = -1;
151 fep->phy = NULL;
152
153 /* auto-detect? */
154 if (fpi->phy_addr == -1) {
155 start = 1;
156 end = 32;
157 } else { /* direct */
158 start = fpi->phy_addr;
159 end = start + 1;
160 }
161
162 for (phy_id = start; phy_id < end; phy_id++) {
163 /* skip already used phy addresses on this bus */
164 if (bus->usage_map & (1 << phy_id))
165 continue;
166 r = fs_mii_read(dev, phy_id, MII_PHYSID1);
167 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
168 continue;
169 r = fs_mii_read(dev, phy_id, MII_PHYSID2);
170 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
171 continue;
172 phy_hwid = (phytype << 16) | physubtype;
173 if (phy_hwid != -1)
174 break;
175 }
176
177 if (phy_hwid == -1) {
178 printk(KERN_ERR DRV_MODULE_NAME
179 ": %s No PHY detected! range=0x%02x-0x%02x\n",
180 dev->name, start, end);
181 return -1;
182 }
183
184 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
185 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
186 break;
187
188 if (i >= ARRAY_SIZE(phy_info)) {
189 printk(KERN_ERR DRV_MODULE_NAME
190 ": %s PHY id 0x%08x is not supported!\n",
191 dev->name, phy_hwid);
192 return -1;
193 }
194
195 fep->phy = phy;
196
197 /* mark this address as used */
198 bus->usage_map |= (1 << phy_id);
199
200 printk(KERN_INFO DRV_MODULE_NAME
201 ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
202 dev->name, phy_id, fep->phy->name, phy_hwid,
203 fpi->phy_addr == -1 ? " (auto-detected)" : "");
204
205 return phy_id;
206}
207
208void fs_mii_startup(struct net_device *dev)
209{
210 struct fs_enet_private *fep = netdev_priv(dev);
211
212 if (fep->phy->startup)
213 (*fep->phy->startup) (dev);
214}
215
216void fs_mii_shutdown(struct net_device *dev)
217{
218 struct fs_enet_private *fep = netdev_priv(dev);
219
220 if (fep->phy->shutdown)
221 (*fep->phy->shutdown) (dev);
222}
223
224void fs_mii_ack_int(struct net_device *dev)
225{
226 struct fs_enet_private *fep = netdev_priv(dev);
227
228 if (fep->phy->ack_int)
229 (*fep->phy->ack_int) (dev);
230}
231
232#define MII_LINK 0x0001
233#define MII_HALF 0x0002
234#define MII_FULL 0x0004
235#define MII_BASE4 0x0008
236#define MII_10M 0x0010
237#define MII_100M 0x0020
238#define MII_1G 0x0040
239#define MII_10G 0x0080
240
241/* return full mii info at one gulp, with a usable form */
242static unsigned int mii_full_status(struct mii_if_info *mii)
243{
244 unsigned int status;
245 int bmsr, adv, lpa, neg;
246 struct fs_enet_private* fep = netdev_priv(mii->dev);
247
248 /* first, a dummy read, needed to latch some MII phys */
249 (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
250 bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
251
252 /* no link */
253 if ((bmsr & BMSR_LSTATUS) == 0)
254 return 0;
255
256 status = MII_LINK;
257
258 /* Lets look what ANEG says if it's supported - otherwize we shall
259 take the right values from the platform info*/
260 if(!mii->force_media) {
261 /* autoneg not completed; don't bother */
262 if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
263 return 0;
264
265 adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
266 lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
267
268 neg = lpa & adv;
269 } else {
270 neg = fep->fpi->bus_info->lpa;
271 }
272
273 if (neg & LPA_100FULL)
274 status |= MII_FULL | MII_100M;
275 else if (neg & LPA_100BASE4)
276 status |= MII_FULL | MII_BASE4 | MII_100M;
277 else if (neg & LPA_100HALF)
278 status |= MII_HALF | MII_100M;
279 else if (neg & LPA_10FULL)
280 status |= MII_FULL | MII_10M;
281 else
282 status |= MII_HALF | MII_10M;
283
284 return status;
285}
286
287void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
288{
289 struct fs_enet_private *fep = netdev_priv(dev);
290 struct mii_if_info *mii = &fep->mii_if;
291 unsigned int mii_status;
292 int ok_to_print, link, duplex, speed;
293 unsigned long flags;
294
295 ok_to_print = netif_msg_link(fep);
296
297 mii_status = mii_full_status(mii);
298
299 if (!init_media && mii_status == fep->last_mii_status)
300 return;
301
302 fep->last_mii_status = mii_status;
303
304 link = !!(mii_status & MII_LINK);
305 duplex = !!(mii_status & MII_FULL);
306 speed = (mii_status & MII_100M) ? 100 : 10;
307
308 if (link == 0) {
309 netif_carrier_off(mii->dev);
310 netif_stop_queue(dev);
311 if (!init_media) {
312 spin_lock_irqsave(&fep->lock, flags);
313 (*fep->ops->stop)(dev);
314 spin_unlock_irqrestore(&fep->lock, flags);
315 }
316
317 if (ok_to_print)
318 printk(KERN_INFO "%s: link down\n", mii->dev->name);
319
320 } else {
321
322 mii->full_duplex = duplex;
323
324 netif_carrier_on(mii->dev);
325
326 spin_lock_irqsave(&fep->lock, flags);
327 fep->duplex = duplex;
328 fep->speed = speed;
329 (*fep->ops->restart)(dev);
330 spin_unlock_irqrestore(&fep->lock, flags);
331
332 netif_start_queue(dev);
333
334 if (ok_to_print)
335 printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
336 dev->name, speed, duplex ? "full" : "half");
337 }
338}
339
340/**********************************************************************************/
341
342int fs_mii_read(struct net_device *dev, int phy_id, int location)
343{
344 struct fs_enet_private *fep = netdev_priv(dev);
345 struct fs_enet_mii_bus *bus = fep->mii_bus;
346
347 unsigned long flags;
348 int ret;
349
350 spin_lock_irqsave(&bus->mii_lock, flags);
351 ret = (*bus->mii_read)(bus, phy_id, location);
352 spin_unlock_irqrestore(&bus->mii_lock, flags);
353
354 return ret;
355}
356
357void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
358{
359 struct fs_enet_private *fep = netdev_priv(dev);
360 struct fs_enet_mii_bus *bus = fep->mii_bus;
361 unsigned long flags;
362
363 spin_lock_irqsave(&bus->mii_lock, flags);
364 (*bus->mii_write)(bus, phy_id, location, value);
365 spin_unlock_irqrestore(&bus->mii_lock, flags);
366}
367
368/*****************************************************************************/
369
370/* list of all registered mii buses */
371static LIST_HEAD(fs_mii_bus_list);
372
373static struct fs_enet_mii_bus *lookup_bus(int method, int id)
374{
375 struct list_head *ptr;
376 struct fs_enet_mii_bus *bus;
377
378 list_for_each(ptr, &fs_mii_bus_list) {
379 bus = list_entry(ptr, struct fs_enet_mii_bus, list);
380 if (bus->bus_info->method == method &&
381 bus->bus_info->id == id)
382 return bus;
383 }
384 return NULL;
385}
386
387static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
388{
389 struct fs_enet_mii_bus *bus;
390 int ret = 0;
391
392 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
393 if (bus == NULL) {
394 ret = -ENOMEM;
395 goto err;
396 }
397 memset(bus, 0, sizeof(*bus));
398 spin_lock_init(&bus->mii_lock);
399 bus->bus_info = bi;
400 bus->refs = 0;
401 bus->usage_map = 0;
402
403 /* perform initialization */
404 switch (bi->method) {
405
406 case fsmii_fixed:
407 ret = fs_mii_fixed_init(bus);
408 if (ret != 0)
409 goto err;
410 break;
411
412 case fsmii_bitbang:
413 ret = fs_mii_bitbang_init(bus);
414 if (ret != 0)
415 goto err;
416 break;
417#ifdef CONFIG_FS_ENET_HAS_FEC
418 case fsmii_fec:
419 ret = fs_mii_fec_init(bus);
420 if (ret != 0)
421 goto err;
422 break;
423#endif
424 default:
425 ret = -EINVAL;
426 goto err;
427 }
428
429 list_add(&bus->list, &fs_mii_bus_list);
430
431 return bus;
432
433err:
434 if (bus)
435 kfree(bus);
436 return ERR_PTR(ret);
437}
438
439static void destroy_bus(struct fs_enet_mii_bus *bus)
440{
441 /* remove from bus list */
442 list_del(&bus->list);
443
444 /* nothing more needed */
445 kfree(bus);
446}
447
448int fs_mii_connect(struct net_device *dev)
449{
450 struct fs_enet_private *fep = netdev_priv(dev);
451 const struct fs_platform_info *fpi = fep->fpi;
452 struct fs_enet_mii_bus *bus = NULL;
453
454 /* check method validity */
455 switch (fpi->bus_info->method) {
456 case fsmii_fixed:
457 case fsmii_bitbang:
458 break;
459#ifdef CONFIG_FS_ENET_HAS_FEC
460 case fsmii_fec:
461 break;
462#endif
463 default:
464 printk(KERN_ERR DRV_MODULE_NAME
465 ": %s Unknown MII bus method (%d)!\n",
466 dev->name, fpi->bus_info->method);
467 return -EINVAL;
468 }
469
470 bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
471
472 /* if not found create new bus */
473 if (bus == NULL) {
474 bus = create_bus(fpi->bus_info);
475 if (IS_ERR(bus)) {
476 printk(KERN_ERR DRV_MODULE_NAME
477 ": %s MII bus creation failure!\n", dev->name);
478 return PTR_ERR(bus);
479 }
480 }
481
482 bus->refs++;
483
484 fep->mii_bus = bus;
485
486 fep->mii_if.dev = dev;
487 fep->mii_if.phy_id_mask = 0x1f;
488 fep->mii_if.reg_num_mask = 0x1f;
489 fep->mii_if.mdio_read = fs_mii_read;
490 fep->mii_if.mdio_write = fs_mii_write;
491 fep->mii_if.force_media = fpi->bus_info->disable_aneg;
492 fep->mii_if.phy_id = phy_id_detect(dev);
493
494 return 0;
495}
496
497void fs_mii_disconnect(struct net_device *dev)
498{
499 struct fs_enet_private *fep = netdev_priv(dev);
500 struct fs_enet_mii_bus *bus = NULL;
501
502 bus = fep->mii_bus;
503 fep->mii_bus = NULL;
504
505 if (--bus->refs <= 0)
506 destroy_bus(bus);
507}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
new file mode 100644
index 000000000000..1105543b9d88
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -0,0 +1,245 @@
1#ifndef FS_ENET_H
2#define FS_ENET_H
3
4#include <linux/mii.h>
5#include <linux/netdevice.h>
6#include <linux/types.h>
7#include <linux/version.h>
8#include <linux/list.h>
9
10#include <linux/fs_enet_pd.h>
11
12#include <asm/dma-mapping.h>
13
14#ifdef CONFIG_CPM1
15#include <asm/commproc.h>
16#endif
17
18#ifdef CONFIG_CPM2
19#include <asm/cpm2.h>
20#endif
21
22/* hw driver ops */
23struct fs_ops {
24 int (*setup_data)(struct net_device *dev);
25 int (*allocate_bd)(struct net_device *dev);
26 void (*free_bd)(struct net_device *dev);
27 void (*cleanup_data)(struct net_device *dev);
28 void (*set_multicast_list)(struct net_device *dev);
29 void (*restart)(struct net_device *dev);
30 void (*stop)(struct net_device *dev);
31 void (*pre_request_irq)(struct net_device *dev, int irq);
32 void (*post_free_irq)(struct net_device *dev, int irq);
33 void (*napi_clear_rx_event)(struct net_device *dev);
34 void (*napi_enable_rx)(struct net_device *dev);
35 void (*napi_disable_rx)(struct net_device *dev);
36 void (*rx_bd_done)(struct net_device *dev);
37 void (*tx_kickstart)(struct net_device *dev);
38 u32 (*get_int_events)(struct net_device *dev);
39 void (*clear_int_events)(struct net_device *dev, u32 int_events);
40 void (*ev_error)(struct net_device *dev, u32 int_events);
41 int (*get_regs)(struct net_device *dev, void *p, int *sizep);
42 int (*get_regs_len)(struct net_device *dev);
43 void (*tx_restart)(struct net_device *dev);
44};
45
46struct phy_info {
47 unsigned int id;
48 const char *name;
49 void (*startup) (struct net_device * dev);
50 void (*shutdown) (struct net_device * dev);
51 void (*ack_int) (struct net_device * dev);
52};
53
54/* The FEC stores dest/src/type, data, and checksum for receive packets.
55 */
56#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
57#define MIN_MTU 46 /* this is data size */
58#define CRC_LEN 4
59
60#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
61#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
62
63/* Must be a multiple of 32 (to cover both FEC & FCC) */
64#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
65/* This is needed so that invalidate_xxx wont invalidate too much */
66#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
67
68struct fs_enet_mii_bus {
69 struct list_head list;
70 spinlock_t mii_lock;
71 const struct fs_mii_bus_info *bus_info;
72 int refs;
73 u32 usage_map;
74
75 int (*mii_read)(struct fs_enet_mii_bus *bus,
76 int phy_id, int location);
77
78 void (*mii_write)(struct fs_enet_mii_bus *bus,
79 int phy_id, int location, int value);
80
81 union {
82 struct {
83 unsigned int mii_speed;
84 void *fecp;
85 } fec;
86
87 struct {
88 /* note that the actual port size may */
89 /* be different; cpm(s) handle it OK */
90 u8 mdio_msk;
91 u8 *mdio_dir;
92 u8 *mdio_dat;
93 u8 mdc_msk;
94 u8 *mdc_dir;
95 u8 *mdc_dat;
96 } bitbang;
97
98 struct {
99 u16 lpa;
100 } fixed;
101 };
102};
103
104int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
105int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
106int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
107
108struct fs_enet_private {
109 struct device *dev; /* pointer back to the device (must be initialized first) */
110 spinlock_t lock; /* during all ops except TX pckt processing */
111 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
112 const struct fs_platform_info *fpi;
113 const struct fs_ops *ops;
114 int rx_ring, tx_ring;
115 dma_addr_t ring_mem_addr;
116 void *ring_base;
117 struct sk_buff **rx_skbuff;
118 struct sk_buff **tx_skbuff;
119 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
120 cbd_t *tx_bd_base;
121 cbd_t *dirty_tx; /* ring entries to be free()ed. */
122 cbd_t *cur_rx;
123 cbd_t *cur_tx;
124 int tx_free;
125 struct net_device_stats stats;
126 struct timer_list phy_timer_list;
127 const struct phy_info *phy;
128 u32 msg_enable;
129 struct mii_if_info mii_if;
130 unsigned int last_mii_status;
131 struct fs_enet_mii_bus *mii_bus;
132 int interrupt;
133
134 int duplex, speed; /* current settings */
135
136 /* event masks */
137 u32 ev_napi_rx; /* mask of NAPI rx events */
138 u32 ev_rx; /* rx event mask */
139 u32 ev_tx; /* tx event mask */
140 u32 ev_err; /* error event mask */
141
142 u16 bd_rx_empty; /* mask of BD rx empty */
143 u16 bd_rx_err; /* mask of BD rx errors */
144
145 union {
146 struct {
147 int idx; /* FEC1 = 0, FEC2 = 1 */
148 void *fecp; /* hw registers */
149 u32 hthi, htlo; /* state for multicast */
150 } fec;
151
152 struct {
153 int idx; /* FCC1-3 = 0-2 */
154 void *fccp; /* hw registers */
155 void *ep; /* parameter ram */
156 void *fcccp; /* hw registers cont. */
157 void *mem; /* FCC DPRAM */
158 u32 gaddrh, gaddrl; /* group address */
159 } fcc;
160
161 struct {
162 int idx; /* FEC1 = 0, FEC2 = 1 */
163 void *sccp; /* hw registers */
164 void *ep; /* parameter ram */
165 u32 hthi, htlo; /* state for multicast */
166 } scc;
167
168 };
169};
170
171/***************************************************************************/
172
173int fs_mii_read(struct net_device *dev, int phy_id, int location);
174void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
175
176void fs_mii_startup(struct net_device *dev);
177void fs_mii_shutdown(struct net_device *dev);
178void fs_mii_ack_int(struct net_device *dev);
179
180void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
181
182void fs_init_bds(struct net_device *dev);
183void fs_cleanup_bds(struct net_device *dev);
184
185/***************************************************************************/
186
187#define DRV_MODULE_NAME "fs_enet"
188#define PFX DRV_MODULE_NAME ": "
189#define DRV_MODULE_VERSION "1.0"
190#define DRV_MODULE_RELDATE "Aug 8, 2005"
191
192/***************************************************************************/
193
194int fs_enet_platform_init(void);
195void fs_enet_platform_cleanup(void);
196
197/***************************************************************************/
198
199/* buffer descriptor access macros */
200
201/* access macros */
202#if defined(CONFIG_CPM1)
203/* for a a CPM1 __raw_xxx's are sufficient */
204#define __cbd_out32(addr, x) __raw_writel(x, addr)
205#define __cbd_out16(addr, x) __raw_writew(x, addr)
206#define __cbd_in32(addr) __raw_readl(addr)
207#define __cbd_in16(addr) __raw_readw(addr)
208#else
209/* for others play it safe */
210#define __cbd_out32(addr, x) out_be32(addr, x)
211#define __cbd_out16(addr, x) out_be16(addr, x)
212#define __cbd_in32(addr) in_be32(addr)
213#define __cbd_in16(addr) in_be16(addr)
214#endif
215
216/* write */
217#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
218#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
219#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
220
221/* read */
222#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
223#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
224#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
225
226/* set bits */
227#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
228
229/* clear bits */
230#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
231
232/*******************************************************************/
233
234extern const struct fs_ops fs_fec_ops;
235extern const struct fs_ops fs_fcc_ops;
236extern const struct fs_ops fs_scc_ops;
237
238/*******************************************************************/
239
240/* handy pointer to the immap */
241extern void *fs_enet_immap;
242
243/*******************************************************************/
244
245#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
new file mode 100644
index 000000000000..a940b96433c7
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -0,0 +1,578 @@
1/*
2 * FCC driver for Motorola MPC82xx (PQ2).
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/immap_cpm2.h>
39#include <asm/mpc8260.h>
40#include <asm/cpm2.h>
41
42#include <asm/pgtable.h>
43#include <asm/irq.h>
44#include <asm/uaccess.h>
45
46#include "fs_enet.h"
47
48/*************************************************/
49
50/* FCC access macros */
51
52#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x)
53#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x)
54#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x)
55#define __fcc_in32(addr) in_be32((unsigned *)addr)
56#define __fcc_in16(addr) in_be16((unsigned short *)addr)
57#define __fcc_in8(addr) in_8((unsigned char *)addr)
58
59/* parameter space */
60
61/* write, read, set bits, clear bits */
62#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v))
63#define R32(_p, _m) __fcc_in32(&(_p)->_m)
64#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
65#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
66
67#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v))
68#define R16(_p, _m) __fcc_in16(&(_p)->_m)
69#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
70#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
71
72#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v))
73#define R8(_p, _m) __fcc_in8(&(_p)->_m)
74#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
75#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
76
77/*************************************************/
78
79#define FCC_MAX_MULTICAST_ADDRS 64
80
81#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
82#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
83#define mk_mii_end 0
84
85#define MAX_CR_CMD_LOOPS 10000
86
87static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op)
88{
89 const struct fs_platform_info *fpi = fep->fpi;
90
91 cpm2_map_t *immap = fs_enet_immap;
92 cpm_cpm2_t *cpmp = &immap->im_cpm;
93 u32 v;
94 int i;
95
96 /* Currently I don't know what feature call will look like. But
97 I guess there'd be something like do_cpm_cmd() which will require page & sblock */
98 v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op);
99 W32(cpmp, cp_cpcr, v | CPM_CR_FLG);
100 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
101 if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
102 break;
103
104 if (i >= MAX_CR_CMD_LOOPS) {
105 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
106 __FUNCTION__);
107 return 1;
108 }
109
110 return 0;
111}
112
113static int do_pd_setup(struct fs_enet_private *fep)
114{
115 struct platform_device *pdev = to_platform_device(fep->dev);
116 struct resource *r;
117
118 /* Fill out IRQ field */
119 fep->interrupt = platform_get_irq(pdev, 0);
120
121 /* Attach the memory for the FCC Parameter RAM */
122 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
123 fep->fcc.ep = (void *)r->start;
124
125 if (fep->fcc.ep == NULL)
126 return -EINVAL;
127
128 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
129 fep->fcc.fccp = (void *)r->start;
130
131 if (fep->fcc.fccp == NULL)
132 return -EINVAL;
133
134 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
135
136 if (fep->fcc.fcccp == NULL)
137 return -EINVAL;
138
139 return 0;
140}
141
142#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
143#define FCC_RX_EVENT (FCC_ENET_RXF)
144#define FCC_TX_EVENT (FCC_ENET_TXB)
145#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
146
147static int setup_data(struct net_device *dev)
148{
149 struct fs_enet_private *fep = netdev_priv(dev);
150 const struct fs_platform_info *fpi = fep->fpi;
151
152 fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
153 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
154 return -EINVAL;
155
156 fep->fcc.mem = (void *)fpi->mem_offset;
157
158 if (do_pd_setup(fep) != 0)
159 return -EINVAL;
160
161 fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
162 fep->ev_rx = FCC_RX_EVENT;
163 fep->ev_tx = FCC_TX_EVENT;
164 fep->ev_err = FCC_ERR_EVENT_MSK;
165
166 return 0;
167}
168
169static int allocate_bd(struct net_device *dev)
170{
171 struct fs_enet_private *fep = netdev_priv(dev);
172 const struct fs_platform_info *fpi = fep->fpi;
173
174 fep->ring_base = dma_alloc_coherent(fep->dev,
175 (fpi->tx_ring + fpi->rx_ring) *
176 sizeof(cbd_t), &fep->ring_mem_addr,
177 GFP_KERNEL);
178 if (fep->ring_base == NULL)
179 return -ENOMEM;
180
181 return 0;
182}
183
184static void free_bd(struct net_device *dev)
185{
186 struct fs_enet_private *fep = netdev_priv(dev);
187 const struct fs_platform_info *fpi = fep->fpi;
188
189 if (fep->ring_base)
190 dma_free_coherent(fep->dev,
191 (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
192 fep->ring_base, fep->ring_mem_addr);
193}
194
195static void cleanup_data(struct net_device *dev)
196{
197 /* nothing */
198}
199
200static void set_promiscuous_mode(struct net_device *dev)
201{
202 struct fs_enet_private *fep = netdev_priv(dev);
203 fcc_t *fccp = fep->fcc.fccp;
204
205 S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
206}
207
208static void set_multicast_start(struct net_device *dev)
209{
210 struct fs_enet_private *fep = netdev_priv(dev);
211 fcc_enet_t *ep = fep->fcc.ep;
212
213 W32(ep, fen_gaddrh, 0);
214 W32(ep, fen_gaddrl, 0);
215}
216
217static void set_multicast_one(struct net_device *dev, const u8 *mac)
218{
219 struct fs_enet_private *fep = netdev_priv(dev);
220 fcc_enet_t *ep = fep->fcc.ep;
221 u16 taddrh, taddrm, taddrl;
222
223 taddrh = ((u16)mac[5] << 8) | mac[4];
224 taddrm = ((u16)mac[3] << 8) | mac[2];
225 taddrl = ((u16)mac[1] << 8) | mac[0];
226
227 W16(ep, fen_taddrh, taddrh);
228 W16(ep, fen_taddrm, taddrm);
229 W16(ep, fen_taddrl, taddrl);
230 fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR);
231}
232
233static void set_multicast_finish(struct net_device *dev)
234{
235 struct fs_enet_private *fep = netdev_priv(dev);
236 fcc_t *fccp = fep->fcc.fccp;
237 fcc_enet_t *ep = fep->fcc.ep;
238
239 /* clear promiscuous always */
240 C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
241
242 /* if all multi or too many multicasts; just enable all */
243 if ((dev->flags & IFF_ALLMULTI) != 0 ||
244 dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
245
246 W32(ep, fen_gaddrh, 0xffffffff);
247 W32(ep, fen_gaddrl, 0xffffffff);
248 }
249
250 /* read back */
251 fep->fcc.gaddrh = R32(ep, fen_gaddrh);
252 fep->fcc.gaddrl = R32(ep, fen_gaddrl);
253}
254
255static void set_multicast_list(struct net_device *dev)
256{
257 struct dev_mc_list *pmc;
258
259 if ((dev->flags & IFF_PROMISC) == 0) {
260 set_multicast_start(dev);
261 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
262 set_multicast_one(dev, pmc->dmi_addr);
263 set_multicast_finish(dev);
264 } else
265 set_promiscuous_mode(dev);
266}
267
268static void restart(struct net_device *dev)
269{
270 struct fs_enet_private *fep = netdev_priv(dev);
271 const struct fs_platform_info *fpi = fep->fpi;
272 fcc_t *fccp = fep->fcc.fccp;
273 fcc_c_t *fcccp = fep->fcc.fcccp;
274 fcc_enet_t *ep = fep->fcc.ep;
275 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
276 u16 paddrh, paddrm, paddrl;
277 u16 mem_addr;
278 const unsigned char *mac;
279 int i;
280
281 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
282
283 /* clear everything (slow & steady does it) */
284 for (i = 0; i < sizeof(*ep); i++)
285 __fcc_out8((char *)ep + i, 0);
286
287 /* get physical address */
288 rx_bd_base_phys = fep->ring_mem_addr;
289 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
290
291 /* point to bds */
292 W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
293 W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
294
295 /* Set maximum bytes per receive buffer.
296 * It must be a multiple of 32.
297 */
298 W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
299
300 W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
301 W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
302
303 /* Allocate space in the reserved FCC area of DPRAM for the
304 * internal buffers. No one uses this space (yet), so we
305 * can do this. Later, we will add resource management for
306 * this area.
307 */
308
309 mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
310
311 W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
312 W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
313 W16(ep, fen_padptr, mem_addr + 64);
314
315 /* fill with special symbol... */
316 memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
317
318 W32(ep, fen_genfcc.fcc_rbptr, 0);
319 W32(ep, fen_genfcc.fcc_tbptr, 0);
320 W32(ep, fen_genfcc.fcc_rcrc, 0);
321 W32(ep, fen_genfcc.fcc_tcrc, 0);
322 W16(ep, fen_genfcc.fcc_res1, 0);
323 W32(ep, fen_genfcc.fcc_res2, 0);
324
325 /* no CAM */
326 W32(ep, fen_camptr, 0);
327
328 /* Set CRC preset and mask */
329 W32(ep, fen_cmask, 0xdebb20e3);
330 W32(ep, fen_cpres, 0xffffffff);
331
332 W32(ep, fen_crcec, 0); /* CRC Error counter */
333 W32(ep, fen_alec, 0); /* alignment error counter */
334 W32(ep, fen_disfc, 0); /* discard frame counter */
335 W16(ep, fen_retlim, 15); /* Retry limit threshold */
336 W16(ep, fen_pper, 0); /* Normal persistence */
337
338 /* set group address */
339 W32(ep, fen_gaddrh, fep->fcc.gaddrh);
340 W32(ep, fen_gaddrl, fep->fcc.gaddrh);
341
342 /* Clear hash filter tables */
343 W32(ep, fen_iaddrh, 0);
344 W32(ep, fen_iaddrl, 0);
345
346 /* Clear the Out-of-sequence TxBD */
347 W16(ep, fen_tfcstat, 0);
348 W16(ep, fen_tfclen, 0);
349 W32(ep, fen_tfcptr, 0);
350
351 W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
352 W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
353
354 /* set address */
355 mac = dev->dev_addr;
356 paddrh = ((u16)mac[5] << 8) | mac[4];
357 paddrm = ((u16)mac[3] << 8) | mac[2];
358 paddrl = ((u16)mac[1] << 8) | mac[0];
359
360 W16(ep, fen_paddrh, paddrh);
361 W16(ep, fen_paddrm, paddrm);
362 W16(ep, fen_paddrl, paddrl);
363
364 W16(ep, fen_taddrh, 0);
365 W16(ep, fen_taddrm, 0);
366 W16(ep, fen_taddrl, 0);
367
368 W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
369 W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
370
371 /* Clear stat counters, in case we ever enable RMON */
372 W32(ep, fen_octc, 0);
373 W32(ep, fen_colc, 0);
374 W32(ep, fen_broc, 0);
375 W32(ep, fen_mulc, 0);
376 W32(ep, fen_uspc, 0);
377 W32(ep, fen_frgc, 0);
378 W32(ep, fen_ospc, 0);
379 W32(ep, fen_jbrc, 0);
380 W32(ep, fen_p64c, 0);
381 W32(ep, fen_p65c, 0);
382 W32(ep, fen_p128c, 0);
383 W32(ep, fen_p256c, 0);
384 W32(ep, fen_p512c, 0);
385 W32(ep, fen_p1024c, 0);
386
387 W16(ep, fen_rfthr, 0); /* Suggested by manual */
388 W16(ep, fen_rfcnt, 0);
389 W16(ep, fen_cftype, 0);
390
391 fs_init_bds(dev);
392
393 /* adjust to speed (for RMII mode) */
394 if (fpi->use_rmii) {
395 if (fep->speed == 100)
396 C8(fcccp, fcc_gfemr, 0x20);
397 else
398 S8(fcccp, fcc_gfemr, 0x20);
399 }
400
401 fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX);
402
403 /* clear events */
404 W16(fccp, fcc_fcce, 0xffff);
405
406 /* Enable interrupts we wish to service */
407 W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
408
409 /* Set GFMR to enable Ethernet operating mode */
410 W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
411
412 /* set sync/delimiters */
413 W16(fccp, fcc_fdsr, 0xd555);
414
415 W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
416
417 if (fpi->use_rmii)
418 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
419
420 /* adjust to duplex mode */
421 if (fep->duplex)
422 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
423 else
424 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
425
426 S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
427}
428
429static void stop(struct net_device *dev)
430{
431 struct fs_enet_private *fep = netdev_priv(dev);
432 fcc_t *fccp = fep->fcc.fccp;
433
434 /* stop ethernet */
435 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
436
437 /* clear events */
438 W16(fccp, fcc_fcce, 0xffff);
439
440 /* clear interrupt mask */
441 W16(fccp, fcc_fccm, 0);
442
443 fs_cleanup_bds(dev);
444}
445
446static void pre_request_irq(struct net_device *dev, int irq)
447{
448 /* nothing */
449}
450
451static void post_free_irq(struct net_device *dev, int irq)
452{
453 /* nothing */
454}
455
456static void napi_clear_rx_event(struct net_device *dev)
457{
458 struct fs_enet_private *fep = netdev_priv(dev);
459 fcc_t *fccp = fep->fcc.fccp;
460
461 W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
462}
463
464static void napi_enable_rx(struct net_device *dev)
465{
466 struct fs_enet_private *fep = netdev_priv(dev);
467 fcc_t *fccp = fep->fcc.fccp;
468
469 S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
470}
471
472static void napi_disable_rx(struct net_device *dev)
473{
474 struct fs_enet_private *fep = netdev_priv(dev);
475 fcc_t *fccp = fep->fcc.fccp;
476
477 C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
478}
479
480static void rx_bd_done(struct net_device *dev)
481{
482 /* nothing */
483}
484
485static void tx_kickstart(struct net_device *dev)
486{
487 /* nothing */
488}
489
490static u32 get_int_events(struct net_device *dev)
491{
492 struct fs_enet_private *fep = netdev_priv(dev);
493 fcc_t *fccp = fep->fcc.fccp;
494
495 return (u32)R16(fccp, fcc_fcce);
496}
497
498static void clear_int_events(struct net_device *dev, u32 int_events)
499{
500 struct fs_enet_private *fep = netdev_priv(dev);
501 fcc_t *fccp = fep->fcc.fccp;
502
503 W16(fccp, fcc_fcce, int_events & 0xffff);
504}
505
506static void ev_error(struct net_device *dev, u32 int_events)
507{
508 printk(KERN_WARNING DRV_MODULE_NAME
509 ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
510}
511
512int get_regs(struct net_device *dev, void *p, int *sizep)
513{
514 struct fs_enet_private *fep = netdev_priv(dev);
515
516 if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t))
517 return -EINVAL;
518
519 memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
520 p = (char *)p + sizeof(fcc_t);
521
522 memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t));
523 p = (char *)p + sizeof(fcc_c_t);
524
525 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
526
527 return 0;
528}
529
530int get_regs_len(struct net_device *dev)
531{
532 return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t);
533}
534
535/* Some transmit errors cause the transmitter to shut
536 * down. We now issue a restart transmit. Since the
537 * errors close the BD and update the pointers, the restart
538 * _should_ pick up without having to reset any of our
539 * pointers either. Also, To workaround 8260 device erratum
540 * CPM37, we must disable and then re-enable the transmitter
541 * following a Late Collision, Underrun, or Retry Limit error.
542 */
543void tx_restart(struct net_device *dev)
544{
545 struct fs_enet_private *fep = netdev_priv(dev);
546 fcc_t *fccp = fep->fcc.fccp;
547
548 C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
549 udelay(10);
550 S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
551
552 fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX);
553}
554
555/*************************************************************************/
556
557const struct fs_ops fs_fcc_ops = {
558 .setup_data = setup_data,
559 .cleanup_data = cleanup_data,
560 .set_multicast_list = set_multicast_list,
561 .restart = restart,
562 .stop = stop,
563 .pre_request_irq = pre_request_irq,
564 .post_free_irq = post_free_irq,
565 .napi_clear_rx_event = napi_clear_rx_event,
566 .napi_enable_rx = napi_enable_rx,
567 .napi_disable_rx = napi_disable_rx,
568 .rx_bd_done = rx_bd_done,
569 .tx_kickstart = tx_kickstart,
570 .get_int_events = get_int_events,
571 .clear_int_events = clear_int_events,
572 .ev_error = ev_error,
573 .get_regs = get_regs,
574 .get_regs_len = get_regs_len,
575 .tx_restart = tx_restart,
576 .allocate_bd = allocate_bd,
577 .free_bd = free_bd,
578};
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
new file mode 100644
index 000000000000..5ef4e845a387
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -0,0 +1,653 @@
1/*
2 * Freescale Ethernet controllers
3 *
4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8xx.h>
45#include <asm/commproc.h>
46#endif
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a CPM1 __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_in32(addr) __raw_readl(addr)
57#define __fs_in16(addr) __raw_readw(addr)
58#else
59/* for others play it safe */
60#define __fs_out32(addr, x) out_be32(addr, x)
61#define __fs_out16(addr, x) out_be16(addr, x)
62#define __fs_in32(addr) in_be32(addr)
63#define __fs_in16(addr) in_be16(addr)
64#endif
65
66/* write */
67#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
68
69/* read */
70#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
71
72/* set bits */
73#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
74
75/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77
78
79/* CRC polynomium used by the FEC for the multicast group filtering */
80#define FEC_CRC_POLY 0x04C11DB7
81
82#define FEC_MAX_MULTICAST_ADDRS 64
83
84/* Interrupt events/masks.
85*/
86#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
87#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
88#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
89#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
90#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
91#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
92#define FEC_ENET_RXF 0x02000000U /* Full frame received */
93#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
94#define FEC_ENET_MII 0x00800000U /* MII interrupt */
95#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
96
97#define FEC_ECNTRL_PINMUX 0x00000004
98#define FEC_ECNTRL_ETHER_EN 0x00000002
99#define FEC_ECNTRL_RESET 0x00000001
100
101#define FEC_RCNTRL_BC_REJ 0x00000010
102#define FEC_RCNTRL_PROM 0x00000008
103#define FEC_RCNTRL_MII_MODE 0x00000004
104#define FEC_RCNTRL_DRT 0x00000002
105#define FEC_RCNTRL_LOOP 0x00000001
106
107#define FEC_TCNTRL_FDEN 0x00000004
108#define FEC_TCNTRL_HBC 0x00000002
109#define FEC_TCNTRL_GTS 0x00000001
110
111
112/* Make MII read/write commands for the FEC.
113*/
114#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
115#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
116#define mk_mii_end 0
117
118#define FEC_MII_LOOPS 10000
119
120/*
121 * Delay to wait for FEC reset command to complete (in us)
122 */
123#define FEC_RESET_DELAY 50
124
125static int whack_reset(fec_t * fecp)
126{
127 int i;
128
129 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
130 for (i = 0; i < FEC_RESET_DELAY; i++) {
131 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
132 return 0; /* OK */
133 udelay(1);
134 }
135
136 return -1;
137}
138
139static int do_pd_setup(struct fs_enet_private *fep)
140{
141 struct platform_device *pdev = to_platform_device(fep->dev);
142 struct resource *r;
143
144 /* Fill out IRQ field */
145 fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
146
147 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
148 fep->fec.fecp =(void*)r->start;
149
150 if(fep->fec.fecp == NULL)
151 return -EINVAL;
152
153 return 0;
154
155}
156
157#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
158#define FEC_RX_EVENT (FEC_ENET_RXF)
159#define FEC_TX_EVENT (FEC_ENET_TXF)
160#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
161 FEC_ENET_BABT | FEC_ENET_EBERR)
162
163static int setup_data(struct net_device *dev)
164{
165 struct fs_enet_private *fep = netdev_priv(dev);
166
167 if (do_pd_setup(fep) != 0)
168 return -EINVAL;
169
170 fep->fec.hthi = 0;
171 fep->fec.htlo = 0;
172
173 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
174 fep->ev_rx = FEC_RX_EVENT;
175 fep->ev_tx = FEC_TX_EVENT;
176 fep->ev_err = FEC_ERR_EVENT_MSK;
177
178 return 0;
179}
180
181static int allocate_bd(struct net_device *dev)
182{
183 struct fs_enet_private *fep = netdev_priv(dev);
184 const struct fs_platform_info *fpi = fep->fpi;
185
186 fep->ring_base = dma_alloc_coherent(fep->dev,
187 (fpi->tx_ring + fpi->rx_ring) *
188 sizeof(cbd_t), &fep->ring_mem_addr,
189 GFP_KERNEL);
190 if (fep->ring_base == NULL)
191 return -ENOMEM;
192
193 return 0;
194}
195
196static void free_bd(struct net_device *dev)
197{
198 struct fs_enet_private *fep = netdev_priv(dev);
199 const struct fs_platform_info *fpi = fep->fpi;
200
201 if(fep->ring_base)
202 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
203 * sizeof(cbd_t),
204 fep->ring_base,
205 fep->ring_mem_addr);
206}
207
208static void cleanup_data(struct net_device *dev)
209{
210 /* nothing */
211}
212
213static void set_promiscuous_mode(struct net_device *dev)
214{
215 struct fs_enet_private *fep = netdev_priv(dev);
216 fec_t *fecp = fep->fec.fecp;
217
218 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
219}
220
221static void set_multicast_start(struct net_device *dev)
222{
223 struct fs_enet_private *fep = netdev_priv(dev);
224
225 fep->fec.hthi = 0;
226 fep->fec.htlo = 0;
227}
228
229static void set_multicast_one(struct net_device *dev, const u8 *mac)
230{
231 struct fs_enet_private *fep = netdev_priv(dev);
232 int temp, hash_index, i, j;
233 u32 crc, csrVal;
234 u8 byte, msb;
235
236 crc = 0xffffffff;
237 for (i = 0; i < 6; i++) {
238 byte = mac[i];
239 for (j = 0; j < 8; j++) {
240 msb = crc >> 31;
241 crc <<= 1;
242 if (msb ^ (byte & 0x1))
243 crc ^= FEC_CRC_POLY;
244 byte >>= 1;
245 }
246 }
247
248 temp = (crc & 0x3f) >> 1;
249 hash_index = ((temp & 0x01) << 4) |
250 ((temp & 0x02) << 2) |
251 ((temp & 0x04)) |
252 ((temp & 0x08) >> 2) |
253 ((temp & 0x10) >> 4);
254 csrVal = 1 << hash_index;
255 if (crc & 1)
256 fep->fec.hthi |= csrVal;
257 else
258 fep->fec.htlo |= csrVal;
259}
260
261static void set_multicast_finish(struct net_device *dev)
262{
263 struct fs_enet_private *fep = netdev_priv(dev);
264 fec_t *fecp = fep->fec.fecp;
265
266 /* if all multi or too many multicasts; just enable all */
267 if ((dev->flags & IFF_ALLMULTI) != 0 ||
268 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
269 fep->fec.hthi = 0xffffffffU;
270 fep->fec.htlo = 0xffffffffU;
271 }
272
273 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
274 FW(fecp, hash_table_high, fep->fec.hthi);
275 FW(fecp, hash_table_low, fep->fec.htlo);
276}
277
278static void set_multicast_list(struct net_device *dev)
279{
280 struct dev_mc_list *pmc;
281
282 if ((dev->flags & IFF_PROMISC) == 0) {
283 set_multicast_start(dev);
284 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
285 set_multicast_one(dev, pmc->dmi_addr);
286 set_multicast_finish(dev);
287 } else
288 set_promiscuous_mode(dev);
289}
290
291static void restart(struct net_device *dev)
292{
293#ifdef CONFIG_DUET
294 immap_t *immap = fs_enet_immap;
295 u32 cptr;
296#endif
297 struct fs_enet_private *fep = netdev_priv(dev);
298 fec_t *fecp = fep->fec.fecp;
299 const struct fs_platform_info *fpi = fep->fpi;
300 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
301 int r;
302 u32 addrhi, addrlo;
303
304 r = whack_reset(fep->fec.fecp);
305 if (r != 0)
306 printk(KERN_ERR DRV_MODULE_NAME
307 ": %s FEC Reset FAILED!\n", dev->name);
308
309 /*
310 * Set station address.
311 */
312 addrhi = ((u32) dev->dev_addr[0] << 24) |
313 ((u32) dev->dev_addr[1] << 16) |
314 ((u32) dev->dev_addr[2] << 8) |
315 (u32) dev->dev_addr[3];
316 addrlo = ((u32) dev->dev_addr[4] << 24) |
317 ((u32) dev->dev_addr[5] << 16);
318 FW(fecp, addr_low, addrhi);
319 FW(fecp, addr_high, addrlo);
320
321 /*
322 * Reset all multicast.
323 */
324 FW(fecp, hash_table_high, fep->fec.hthi);
325 FW(fecp, hash_table_low, fep->fec.htlo);
326
327 /*
328 * Set maximum receive buffer size.
329 */
330 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
331 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
332
333 /* get physical address */
334 rx_bd_base_phys = fep->ring_mem_addr;
335 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
336
337 /*
338 * Set receive and transmit descriptor base.
339 */
340 FW(fecp, r_des_start, rx_bd_base_phys);
341 FW(fecp, x_des_start, tx_bd_base_phys);
342
343 fs_init_bds(dev);
344
345 /*
346 * Enable big endian and don't care about SDMA FC.
347 */
348 FW(fecp, fun_code, 0x78000000);
349
350 /*
351 * Set MII speed.
352 */
353 FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
354
355 /*
356 * Clear any outstanding interrupt.
357 */
358 FW(fecp, ievent, 0xffc0);
359 FW(fecp, ivec, (fep->interrupt / 2) << 29);
360
361
362 /*
363 * adjust to speed (only for DUET & RMII)
364 */
365#ifdef CONFIG_DUET
366 if (fpi->use_rmii) {
367 cptr = in_be32(&immap->im_cpm.cp_cptr);
368 switch (fs_get_fec_index(fpi->fs_no)) {
369 case 0:
370 cptr |= 0x100;
371 if (fep->speed == 10)
372 cptr |= 0x0000010;
373 else if (fep->speed == 100)
374 cptr &= ~0x0000010;
375 break;
376 case 1:
377 cptr |= 0x80;
378 if (fep->speed == 10)
379 cptr |= 0x0000008;
380 else if (fep->speed == 100)
381 cptr &= ~0x0000008;
382 break;
383 default:
384 BUG(); /* should never happen */
385 break;
386 }
387 out_be32(&immap->im_cpm.cp_cptr, cptr);
388 }
389#endif
390
391 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
392 /*
393 * adjust to duplex mode
394 */
395 if (fep->duplex) {
396 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
397 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
398 } else {
399 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
400 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
401 }
402
403 /*
404 * Enable interrupts we wish to service.
405 */
406 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
407 FEC_ENET_RXF | FEC_ENET_RXB);
408
409 /*
410 * And last, enable the transmit and receive processing.
411 */
412 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
413 FW(fecp, r_des_active, 0x01000000);
414}
415
416static void stop(struct net_device *dev)
417{
418 struct fs_enet_private *fep = netdev_priv(dev);
419 fec_t *fecp = fep->fec.fecp;
420 struct fs_enet_mii_bus *bus = fep->mii_bus;
421 const struct fs_mii_bus_info *bi = bus->bus_info;
422 int i;
423
424 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
425 return; /* already down */
426
427 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
428 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
429 i < FEC_RESET_DELAY; i++)
430 udelay(1);
431
432 if (i == FEC_RESET_DELAY)
433 printk(KERN_WARNING DRV_MODULE_NAME
434 ": %s FEC timeout on graceful transmit stop\n",
435 dev->name);
436 /*
437 * Disable FEC. Let only MII interrupts.
438 */
439 FW(fecp, imask, 0);
440 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
441
442 fs_cleanup_bds(dev);
443
444 /* shut down FEC1? that's where the mii bus is */
445 if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
446 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
447 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
448 FW(fecp, ievent, FEC_ENET_MII);
449 FW(fecp, mii_speed, bus->fec.mii_speed);
450 }
451}
452
453static void pre_request_irq(struct net_device *dev, int irq)
454{
455 immap_t *immap = fs_enet_immap;
456 u32 siel;
457
458 /* SIU interrupt */
459 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
460
461 siel = in_be32(&immap->im_siu_conf.sc_siel);
462 if ((irq & 1) == 0)
463 siel |= (0x80000000 >> irq);
464 else
465 siel &= ~(0x80000000 >> (irq & ~1));
466 out_be32(&immap->im_siu_conf.sc_siel, siel);
467 }
468}
469
470static void post_free_irq(struct net_device *dev, int irq)
471{
472 /* nothing */
473}
474
475static void napi_clear_rx_event(struct net_device *dev)
476{
477 struct fs_enet_private *fep = netdev_priv(dev);
478 fec_t *fecp = fep->fec.fecp;
479
480 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
481}
482
483static void napi_enable_rx(struct net_device *dev)
484{
485 struct fs_enet_private *fep = netdev_priv(dev);
486 fec_t *fecp = fep->fec.fecp;
487
488 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
489}
490
491static void napi_disable_rx(struct net_device *dev)
492{
493 struct fs_enet_private *fep = netdev_priv(dev);
494 fec_t *fecp = fep->fec.fecp;
495
496 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
497}
498
499static void rx_bd_done(struct net_device *dev)
500{
501 struct fs_enet_private *fep = netdev_priv(dev);
502 fec_t *fecp = fep->fec.fecp;
503
504 FW(fecp, r_des_active, 0x01000000);
505}
506
507static void tx_kickstart(struct net_device *dev)
508{
509 struct fs_enet_private *fep = netdev_priv(dev);
510 fec_t *fecp = fep->fec.fecp;
511
512 FW(fecp, x_des_active, 0x01000000);
513}
514
515static u32 get_int_events(struct net_device *dev)
516{
517 struct fs_enet_private *fep = netdev_priv(dev);
518 fec_t *fecp = fep->fec.fecp;
519
520 return FR(fecp, ievent) & FR(fecp, imask);
521}
522
523static void clear_int_events(struct net_device *dev, u32 int_events)
524{
525 struct fs_enet_private *fep = netdev_priv(dev);
526 fec_t *fecp = fep->fec.fecp;
527
528 FW(fecp, ievent, int_events);
529}
530
531static void ev_error(struct net_device *dev, u32 int_events)
532{
533 printk(KERN_WARNING DRV_MODULE_NAME
534 ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
535}
536
537int get_regs(struct net_device *dev, void *p, int *sizep)
538{
539 struct fs_enet_private *fep = netdev_priv(dev);
540
541 if (*sizep < sizeof(fec_t))
542 return -EINVAL;
543
544 memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
545
546 return 0;
547}
548
549int get_regs_len(struct net_device *dev)
550{
551 return sizeof(fec_t);
552}
553
554void tx_restart(struct net_device *dev)
555{
556 /* nothing */
557}
558
559/*************************************************************************/
560
561const struct fs_ops fs_fec_ops = {
562 .setup_data = setup_data,
563 .cleanup_data = cleanup_data,
564 .set_multicast_list = set_multicast_list,
565 .restart = restart,
566 .stop = stop,
567 .pre_request_irq = pre_request_irq,
568 .post_free_irq = post_free_irq,
569 .napi_clear_rx_event = napi_clear_rx_event,
570 .napi_enable_rx = napi_enable_rx,
571 .napi_disable_rx = napi_disable_rx,
572 .rx_bd_done = rx_bd_done,
573 .tx_kickstart = tx_kickstart,
574 .get_int_events = get_int_events,
575 .clear_int_events = clear_int_events,
576 .ev_error = ev_error,
577 .get_regs = get_regs,
578 .get_regs_len = get_regs_len,
579 .tx_restart = tx_restart,
580 .allocate_bd = allocate_bd,
581 .free_bd = free_bd,
582};
583
584/***********************************************************************/
585
586static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
587{
588 fec_t *fecp = bus->fec.fecp;
589 int i, ret = -1;
590
591 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
592 BUG();
593
594 /* Add PHY address to register command. */
595 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
596
597 for (i = 0; i < FEC_MII_LOOPS; i++)
598 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
599 break;
600
601 if (i < FEC_MII_LOOPS) {
602 FW(fecp, ievent, FEC_ENET_MII);
603 ret = FR(fecp, mii_data) & 0xffff;
604 }
605
606 return ret;
607}
608
609static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
610{
611 fec_t *fecp = bus->fec.fecp;
612 int i;
613
614 /* this must never happen */
615 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
616 BUG();
617
618 /* Add PHY address to register command. */
619 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
620
621 for (i = 0; i < FEC_MII_LOOPS; i++)
622 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
623 break;
624
625 if (i < FEC_MII_LOOPS)
626 FW(fecp, ievent, FEC_ENET_MII);
627}
628
629int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
630{
631 bd_t *bd = (bd_t *)__res;
632 const struct fs_mii_bus_info *bi = bus->bus_info;
633 fec_t *fecp;
634
635 if (bi->id != 0)
636 return -1;
637
638 bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
639 bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
640 & 0x3F) << 1;
641
642 fecp = bus->fec.fecp;
643
644 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
645 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
646 FW(fecp, ievent, FEC_ENET_MII);
647 FW(fecp, mii_speed, bus->fec.mii_speed);
648
649 bus->mii_read = mii_read;
650 bus->mii_write = mii_write;
651
652 return 0;
653}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
new file mode 100644
index 000000000000..d8c6e9cadcf5
--- /dev/null
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -0,0 +1,524 @@
1/*
2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8xx.h>
45#include <asm/commproc.h>
46#endif
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a 8xx __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_out8(addr, x) __raw_writeb(x, addr)
57#define __fs_in32(addr) __raw_readl(addr)
58#define __fs_in16(addr) __raw_readw(addr)
59#define __fs_in8(addr) __raw_readb(addr)
60#else
61/* for others play it safe */
62#define __fs_out32(addr, x) out_be32(addr, x)
63#define __fs_out16(addr, x) out_be16(addr, x)
64#define __fs_in32(addr) in_be32(addr)
65#define __fs_in16(addr) in_be16(addr)
66#endif
67
68/* write, read, set bits, clear bits */
69#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
70#define R32(_p, _m) __fs_in32(&(_p)->_m)
71#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
72#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
73
74#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
75#define R16(_p, _m) __fs_in16(&(_p)->_m)
76#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
77#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
78
79#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
80#define R8(_p, _m) __fs_in8(&(_p)->_m)
81#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
82#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
83
84#define SCC_MAX_MULTICAST_ADDRS 64
85
86/*
87 * Delay to wait for SCC reset command to complete (in us)
88 */
89#define SCC_RESET_DELAY 50
90#define MAX_CR_CMD_LOOPS 10000
91
92static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
93{
94 cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm;
95 u32 v, ch;
96 int i = 0;
97
98 ch = fep->scc.idx << 2;
99 v = mk_cr_cmd(ch, op);
100 W16(cpmp, cp_cpcr, v | CPM_CR_FLG);
101 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
102 if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
103 break;
104
105 if (i >= MAX_CR_CMD_LOOPS) {
106 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
107 __FUNCTION__);
108 return 1;
109 }
110 return 0;
111}
112
113static int do_pd_setup(struct fs_enet_private *fep)
114{
115 struct platform_device *pdev = to_platform_device(fep->dev);
116 struct resource *r;
117
118 /* Fill out IRQ field */
119 fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
120
121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
122 fep->scc.sccp = (void *)r->start;
123
124 if (fep->scc.sccp == NULL)
125 return -EINVAL;
126
127 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
128 fep->scc.ep = (void *)r->start;
129
130 if (fep->scc.ep == NULL)
131 return -EINVAL;
132
133 return 0;
134}
135
136#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
137#define SCC_RX_EVENT (SCCE_ENET_RXF)
138#define SCC_TX_EVENT (SCCE_ENET_TXB)
139#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
140
141static int setup_data(struct net_device *dev)
142{
143 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145
146 fep->scc.idx = fs_get_scc_index(fpi->fs_no);
147 if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */
148 return -EINVAL;
149
150 do_pd_setup(fep);
151
152 fep->scc.hthi = 0;
153 fep->scc.htlo = 0;
154
155 fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
156 fep->ev_rx = SCC_RX_EVENT;
157 fep->ev_tx = SCC_TX_EVENT;
158 fep->ev_err = SCC_ERR_EVENT_MSK;
159
160 return 0;
161}
162
163static int allocate_bd(struct net_device *dev)
164{
165 struct fs_enet_private *fep = netdev_priv(dev);
166 const struct fs_platform_info *fpi = fep->fpi;
167
168 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
169 sizeof(cbd_t), 8);
170 if (IS_DPERR(fep->ring_mem_addr))
171 return -ENOMEM;
172
173 fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
174
175 return 0;
176}
177
178static void free_bd(struct net_device *dev)
179{
180 struct fs_enet_private *fep = netdev_priv(dev);
181
182 if (fep->ring_base)
183 cpm_dpfree(fep->ring_mem_addr);
184}
185
186static void cleanup_data(struct net_device *dev)
187{
188 /* nothing */
189}
190
191static void set_promiscuous_mode(struct net_device *dev)
192{
193 struct fs_enet_private *fep = netdev_priv(dev);
194 scc_t *sccp = fep->scc.sccp;
195
196 S16(sccp, scc_psmr, SCC_PSMR_PRO);
197}
198
199static void set_multicast_start(struct net_device *dev)
200{
201 struct fs_enet_private *fep = netdev_priv(dev);
202 scc_enet_t *ep = fep->scc.ep;
203
204 W16(ep, sen_gaddr1, 0);
205 W16(ep, sen_gaddr2, 0);
206 W16(ep, sen_gaddr3, 0);
207 W16(ep, sen_gaddr4, 0);
208}
209
210static void set_multicast_one(struct net_device *dev, const u8 * mac)
211{
212 struct fs_enet_private *fep = netdev_priv(dev);
213 scc_enet_t *ep = fep->scc.ep;
214 u16 taddrh, taddrm, taddrl;
215
216 taddrh = ((u16) mac[5] << 8) | mac[4];
217 taddrm = ((u16) mac[3] << 8) | mac[2];
218 taddrl = ((u16) mac[1] << 8) | mac[0];
219
220 W16(ep, sen_taddrh, taddrh);
221 W16(ep, sen_taddrm, taddrm);
222 W16(ep, sen_taddrl, taddrl);
223 scc_cr_cmd(fep, CPM_CR_SET_GADDR);
224}
225
226static void set_multicast_finish(struct net_device *dev)
227{
228 struct fs_enet_private *fep = netdev_priv(dev);
229 scc_t *sccp = fep->scc.sccp;
230 scc_enet_t *ep = fep->scc.ep;
231
232 /* clear promiscuous always */
233 C16(sccp, scc_psmr, SCC_PSMR_PRO);
234
235 /* if all multi or too many multicasts; just enable all */
236 if ((dev->flags & IFF_ALLMULTI) != 0 ||
237 dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
238
239 W16(ep, sen_gaddr1, 0xffff);
240 W16(ep, sen_gaddr2, 0xffff);
241 W16(ep, sen_gaddr3, 0xffff);
242 W16(ep, sen_gaddr4, 0xffff);
243 }
244}
245
246static void set_multicast_list(struct net_device *dev)
247{
248 struct dev_mc_list *pmc;
249
250 if ((dev->flags & IFF_PROMISC) == 0) {
251 set_multicast_start(dev);
252 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
253 set_multicast_one(dev, pmc->dmi_addr);
254 set_multicast_finish(dev);
255 } else
256 set_promiscuous_mode(dev);
257}
258
259/*
260 * This function is called to start or restart the FEC during a link
261 * change. This only happens when switching between half and full
262 * duplex.
263 */
264static void restart(struct net_device *dev)
265{
266 struct fs_enet_private *fep = netdev_priv(dev);
267 scc_t *sccp = fep->scc.sccp;
268 scc_enet_t *ep = fep->scc.ep;
269 const struct fs_platform_info *fpi = fep->fpi;
270 u16 paddrh, paddrm, paddrl;
271 const unsigned char *mac;
272 int i;
273
274 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
275
276 /* clear everything (slow & steady does it) */
277 for (i = 0; i < sizeof(*ep); i++)
278 __fs_out8((char *)ep + i, 0);
279
280 /* point to bds */
281 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
282 W16(ep, sen_genscc.scc_tbase,
283 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
284
285 /* Initialize function code registers for big-endian.
286 */
287 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
288 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
289
290 /* Set maximum bytes per receive buffer.
291 * This appears to be an Ethernet frame size, not the buffer
292 * fragment size. It must be a multiple of four.
293 */
294 W16(ep, sen_genscc.scc_mrblr, 0x5f0);
295
296 /* Set CRC preset and mask.
297 */
298 W32(ep, sen_cpres, 0xffffffff);
299 W32(ep, sen_cmask, 0xdebb20e3);
300
301 W32(ep, sen_crcec, 0); /* CRC Error counter */
302 W32(ep, sen_alec, 0); /* alignment error counter */
303 W32(ep, sen_disfc, 0); /* discard frame counter */
304
305 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
306 W16(ep, sen_retlim, 15); /* Retry limit threshold */
307
308 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
309
310 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
311
312 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
313 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
314
315 /* Clear hash tables.
316 */
317 W16(ep, sen_gaddr1, 0);
318 W16(ep, sen_gaddr2, 0);
319 W16(ep, sen_gaddr3, 0);
320 W16(ep, sen_gaddr4, 0);
321 W16(ep, sen_iaddr1, 0);
322 W16(ep, sen_iaddr2, 0);
323 W16(ep, sen_iaddr3, 0);
324 W16(ep, sen_iaddr4, 0);
325
326 /* set address
327 */
328 mac = dev->dev_addr;
329 paddrh = ((u16) mac[5] << 8) | mac[4];
330 paddrm = ((u16) mac[3] << 8) | mac[2];
331 paddrl = ((u16) mac[1] << 8) | mac[0];
332
333 W16(ep, sen_paddrh, paddrh);
334 W16(ep, sen_paddrm, paddrm);
335 W16(ep, sen_paddrl, paddrl);
336
337 W16(ep, sen_pper, 0);
338 W16(ep, sen_taddrl, 0);
339 W16(ep, sen_taddrm, 0);
340 W16(ep, sen_taddrh, 0);
341
342 fs_init_bds(dev);
343
344 scc_cr_cmd(fep, CPM_CR_INIT_TRX);
345
346 W16(sccp, scc_scce, 0xffff);
347
348 /* Enable interrupts we wish to service.
349 */
350 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
351
352 /* Set GSMR_H to enable all normal operating modes.
353 * Set GSMR_L to enable Ethernet to MC68160.
354 */
355 W32(sccp, scc_gsmrh, 0);
356 W32(sccp, scc_gsmrl,
357 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
358 SCC_GSMRL_MODE_ENET);
359
360 /* Set sync/delimiters.
361 */
362 W16(sccp, scc_dsr, 0xd555);
363
364 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
365 * start frame search 22 bit times after RENA.
366 */
367 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
368
369 /* Set full duplex mode if needed */
370 if (fep->duplex)
371 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
372
373 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
374}
375
376static void stop(struct net_device *dev)
377{
378 struct fs_enet_private *fep = netdev_priv(dev);
379 scc_t *sccp = fep->scc.sccp;
380 int i;
381
382 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
383 udelay(1);
384
385 if (i == SCC_RESET_DELAY)
386 printk(KERN_WARNING DRV_MODULE_NAME
387 ": %s SCC timeout on graceful transmit stop\n",
388 dev->name);
389
390 W16(sccp, scc_sccm, 0);
391 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
392
393 fs_cleanup_bds(dev);
394}
395
396static void pre_request_irq(struct net_device *dev, int irq)
397{
398 immap_t *immap = fs_enet_immap;
399 u32 siel;
400
401 /* SIU interrupt */
402 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
403
404 siel = in_be32(&immap->im_siu_conf.sc_siel);
405 if ((irq & 1) == 0)
406 siel |= (0x80000000 >> irq);
407 else
408 siel &= ~(0x80000000 >> (irq & ~1));
409 out_be32(&immap->im_siu_conf.sc_siel, siel);
410 }
411}
412
413static void post_free_irq(struct net_device *dev, int irq)
414{
415 /* nothing */
416}
417
418static void napi_clear_rx_event(struct net_device *dev)
419{
420 struct fs_enet_private *fep = netdev_priv(dev);
421 scc_t *sccp = fep->scc.sccp;
422
423 W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
424}
425
426static void napi_enable_rx(struct net_device *dev)
427{
428 struct fs_enet_private *fep = netdev_priv(dev);
429 scc_t *sccp = fep->scc.sccp;
430
431 S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
432}
433
434static void napi_disable_rx(struct net_device *dev)
435{
436 struct fs_enet_private *fep = netdev_priv(dev);
437 scc_t *sccp = fep->scc.sccp;
438
439 C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
440}
441
442static void rx_bd_done(struct net_device *dev)
443{
444 /* nothing */
445}
446
447static void tx_kickstart(struct net_device *dev)
448{
449 /* nothing */
450}
451
452static u32 get_int_events(struct net_device *dev)
453{
454 struct fs_enet_private *fep = netdev_priv(dev);
455 scc_t *sccp = fep->scc.sccp;
456
457 return (u32) R16(sccp, scc_scce);
458}
459
460static void clear_int_events(struct net_device *dev, u32 int_events)
461{
462 struct fs_enet_private *fep = netdev_priv(dev);
463 scc_t *sccp = fep->scc.sccp;
464
465 W16(sccp, scc_scce, int_events & 0xffff);
466}
467
468static void ev_error(struct net_device *dev, u32 int_events)
469{
470 printk(KERN_WARNING DRV_MODULE_NAME
471 ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
472}
473
474static int get_regs(struct net_device *dev, void *p, int *sizep)
475{
476 struct fs_enet_private *fep = netdev_priv(dev);
477
478 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t))
479 return -EINVAL;
480
481 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
482 p = (char *)p + sizeof(scc_t);
483
484 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t));
485
486 return 0;
487}
488
489static int get_regs_len(struct net_device *dev)
490{
491 return sizeof(scc_t) + sizeof(scc_enet_t);
492}
493
494static void tx_restart(struct net_device *dev)
495{
496 struct fs_enet_private *fep = netdev_priv(dev);
497
498 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
499}
500
501/*************************************************************************/
502
503const struct fs_ops fs_scc_ops = {
504 .setup_data = setup_data,
505 .cleanup_data = cleanup_data,
506 .set_multicast_list = set_multicast_list,
507 .restart = restart,
508 .stop = stop,
509 .pre_request_irq = pre_request_irq,
510 .post_free_irq = post_free_irq,
511 .napi_clear_rx_event = napi_clear_rx_event,
512 .napi_enable_rx = napi_enable_rx,
513 .napi_disable_rx = napi_disable_rx,
514 .rx_bd_done = rx_bd_done,
515 .tx_kickstart = tx_kickstart,
516 .get_int_events = get_int_events,
517 .clear_int_events = clear_int_events,
518 .ev_error = ev_error,
519 .get_regs = get_regs,
520 .get_regs_len = get_regs_len,
521 .tx_restart = tx_restart,
522 .allocate_bd = allocate_bd,
523 .free_bd = free_bd,
524};
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
new file mode 100644
index 000000000000..24a5e2e23d18
--- /dev/null
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -0,0 +1,405 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42#include "fs_enet.h"
43
44#ifdef CONFIG_8xx
45static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
46{
47 immap_t *im = (immap_t *)fs_enet_immap;
48 void *dir, *dat, *ppar;
49 int adv;
50 u8 msk;
51
52 switch (port) {
53 case fsiop_porta:
54 dir = &im->im_ioport.iop_padir;
55 dat = &im->im_ioport.iop_padat;
56 ppar = &im->im_ioport.iop_papar;
57 break;
58
59 case fsiop_portb:
60 dir = &im->im_cpm.cp_pbdir;
61 dat = &im->im_cpm.cp_pbdat;
62 ppar = &im->im_cpm.cp_pbpar;
63 break;
64
65 case fsiop_portc:
66 dir = &im->im_ioport.iop_pcdir;
67 dat = &im->im_ioport.iop_pcdat;
68 ppar = &im->im_ioport.iop_pcpar;
69 break;
70
71 case fsiop_portd:
72 dir = &im->im_ioport.iop_pddir;
73 dat = &im->im_ioport.iop_pddat;
74 ppar = &im->im_ioport.iop_pdpar;
75 break;
76
77 case fsiop_porte:
78 dir = &im->im_cpm.cp_pedir;
79 dat = &im->im_cpm.cp_pedat;
80 ppar = &im->im_cpm.cp_pepar;
81 break;
82
83 default:
84 printk(KERN_ERR DRV_MODULE_NAME
85 "Illegal port value %d!\n", port);
86 return -EINVAL;
87 }
88
89 adv = bit >> 3;
90 dir = (char *)dir + adv;
91 dat = (char *)dat + adv;
92 ppar = (char *)ppar + adv;
93
94 msk = 1 << (7 - (bit & 7));
95 if ((in_8(ppar) & msk) != 0) {
96 printk(KERN_ERR DRV_MODULE_NAME
97 "pin %d on port %d is not general purpose!\n", bit, port);
98 return -EINVAL;
99 }
100
101 *dirp = dir;
102 *datp = dat;
103 *mskp = msk;
104
105 return 0;
106}
107#endif
108
109#ifdef CONFIG_8260
110static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
111{
112 iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
113 void *dir, *dat, *ppar;
114 int adv;
115 u8 msk;
116
117 switch (port) {
118 case fsiop_porta:
119 dir = &io->iop_pdira;
120 dat = &io->iop_pdata;
121 ppar = &io->iop_ppara;
122 break;
123
124 case fsiop_portb:
125 dir = &io->iop_pdirb;
126 dat = &io->iop_pdatb;
127 ppar = &io->iop_pparb;
128 break;
129
130 case fsiop_portc:
131 dir = &io->iop_pdirc;
132 dat = &io->iop_pdatc;
133 ppar = &io->iop_pparc;
134 break;
135
136 case fsiop_portd:
137 dir = &io->iop_pdird;
138 dat = &io->iop_pdatd;
139 ppar = &io->iop_ppard;
140 break;
141
142 default:
143 printk(KERN_ERR DRV_MODULE_NAME
144 "Illegal port value %d!\n", port);
145 return -EINVAL;
146 }
147
148 adv = bit >> 3;
149 dir = (char *)dir + adv;
150 dat = (char *)dat + adv;
151 ppar = (char *)ppar + adv;
152
153 msk = 1 << (7 - (bit & 7));
154 if ((in_8(ppar) & msk) != 0) {
155 printk(KERN_ERR DRV_MODULE_NAME
156 "pin %d on port %d is not general purpose!\n", bit, port);
157 return -EINVAL;
158 }
159
160 *dirp = dir;
161 *datp = dat;
162 *mskp = msk;
163
164 return 0;
165}
166#endif
167
168static inline void bb_set(u8 *p, u8 m)
169{
170 out_8(p, in_8(p) | m);
171}
172
173static inline void bb_clr(u8 *p, u8 m)
174{
175 out_8(p, in_8(p) & ~m);
176}
177
178static inline int bb_read(u8 *p, u8 m)
179{
180 return (in_8(p) & m) != 0;
181}
182
183static inline void mdio_active(struct fs_enet_mii_bus *bus)
184{
185 bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
186}
187
188static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
189{
190 bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
191}
192
193static inline int mdio_read(struct fs_enet_mii_bus *bus)
194{
195 return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
196}
197
198static inline void mdio(struct fs_enet_mii_bus *bus, int what)
199{
200 if (what)
201 bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
202 else
203 bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
204}
205
206static inline void mdc(struct fs_enet_mii_bus *bus, int what)
207{
208 if (what)
209 bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
210 else
211 bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
212}
213
214static inline void mii_delay(struct fs_enet_mii_bus *bus)
215{
216 udelay(bus->bus_info->i.bitbang.delay);
217}
218
219/* Utility to send the preamble, address, and register (common to read and write). */
220static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
221{
222 int j;
223
224 /*
225 * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure.
226 * The IEEE spec says this is a PHY optional requirement. The AMD
227 * 79C874 requires one after power up and one after a MII communications
228 * error. This means that we are doing more preambles than we need,
229 * but it is safer and will be much more robust.
230 */
231
232 mdio_active(bus);
233 mdio(bus, 1);
234 for (j = 0; j < 32; j++) {
235 mdc(bus, 0);
236 mii_delay(bus);
237 mdc(bus, 1);
238 mii_delay(bus);
239 }
240
241 /* send the start bit (01) and the read opcode (10) or write (10) */
242 mdc(bus, 0);
243 mdio(bus, 0);
244 mii_delay(bus);
245 mdc(bus, 1);
246 mii_delay(bus);
247 mdc(bus, 0);
248 mdio(bus, 1);
249 mii_delay(bus);
250 mdc(bus, 1);
251 mii_delay(bus);
252 mdc(bus, 0);
253 mdio(bus, read);
254 mii_delay(bus);
255 mdc(bus, 1);
256 mii_delay(bus);
257 mdc(bus, 0);
258 mdio(bus, !read);
259 mii_delay(bus);
260 mdc(bus, 1);
261 mii_delay(bus);
262
263 /* send the PHY address */
264 for (j = 0; j < 5; j++) {
265 mdc(bus, 0);
266 mdio(bus, (addr & 0x10) != 0);
267 mii_delay(bus);
268 mdc(bus, 1);
269 mii_delay(bus);
270 addr <<= 1;
271 }
272
273 /* send the register address */
274 for (j = 0; j < 5; j++) {
275 mdc(bus, 0);
276 mdio(bus, (reg & 0x10) != 0);
277 mii_delay(bus);
278 mdc(bus, 1);
279 mii_delay(bus);
280 reg <<= 1;
281 }
282}
283
284static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
285{
286 u16 rdreg;
287 int ret, j;
288 u8 addr = phy_id & 0xff;
289 u8 reg = location & 0xff;
290
291 bitbang_pre(bus, 1, addr, reg);
292
293 /* tri-state our MDIO I/O pin so we can read */
294 mdc(bus, 0);
295 mdio_tristate(bus);
296 mii_delay(bus);
297 mdc(bus, 1);
298 mii_delay(bus);
299
300 /* check the turnaround bit: the PHY should be driving it to zero */
301 if (mdio_read(bus) != 0) {
302 /* PHY didn't drive TA low */
303 for (j = 0; j < 32; j++) {
304 mdc(bus, 0);
305 mii_delay(bus);
306 mdc(bus, 1);
307 mii_delay(bus);
308 }
309 ret = -1;
310 goto out;
311 }
312
313 mdc(bus, 0);
314 mii_delay(bus);
315
316 /* read 16 bits of register data, MSB first */
317 rdreg = 0;
318 for (j = 0; j < 16; j++) {
319 mdc(bus, 1);
320 mii_delay(bus);
321 rdreg <<= 1;
322 rdreg |= mdio_read(bus);
323 mdc(bus, 0);
324 mii_delay(bus);
325 }
326
327 mdc(bus, 1);
328 mii_delay(bus);
329 mdc(bus, 0);
330 mii_delay(bus);
331 mdc(bus, 1);
332 mii_delay(bus);
333
334 ret = rdreg;
335out:
336 return ret;
337}
338
339static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
340{
341 int j;
342 u8 addr = phy_id & 0xff;
343 u8 reg = location & 0xff;
344 u16 value = val & 0xffff;
345
346 bitbang_pre(bus, 0, addr, reg);
347
348 /* send the turnaround (10) */
349 mdc(bus, 0);
350 mdio(bus, 1);
351 mii_delay(bus);
352 mdc(bus, 1);
353 mii_delay(bus);
354 mdc(bus, 0);
355 mdio(bus, 0);
356 mii_delay(bus);
357 mdc(bus, 1);
358 mii_delay(bus);
359
360 /* write 16 bits of register data, MSB first */
361 for (j = 0; j < 16; j++) {
362 mdc(bus, 0);
363 mdio(bus, (value & 0x8000) != 0);
364 mii_delay(bus);
365 mdc(bus, 1);
366 mii_delay(bus);
367 value <<= 1;
368 }
369
370 /*
371 * Tri-state the MDIO line.
372 */
373 mdio_tristate(bus);
374 mdc(bus, 0);
375 mii_delay(bus);
376 mdc(bus, 1);
377 mii_delay(bus);
378}
379
380int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
381{
382 const struct fs_mii_bus_info *bi = bus->bus_info;
383 int r;
384
385 r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
386 &bus->bitbang.mdio_dat,
387 &bus->bitbang.mdio_msk,
388 bi->i.bitbang.mdio_port,
389 bi->i.bitbang.mdio_bit);
390 if (r != 0)
391 return r;
392
393 r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
394 &bus->bitbang.mdc_dat,
395 &bus->bitbang.mdc_msk,
396 bi->i.bitbang.mdc_port,
397 bi->i.bitbang.mdc_bit);
398 if (r != 0)
399 return r;
400
401 bus->mii_read = mii_read;
402 bus->mii_write = mii_write;
403
404 return 0;
405}
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
new file mode 100644
index 000000000000..b3e192d612e5
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fixed.c
@@ -0,0 +1,92 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42#include "fs_enet.h"
43
44static const u16 mii_regs[7] = {
45 0x3100,
46 0x786d,
47 0x0fff,
48 0x0fff,
49 0x01e1,
50 0x45e1,
51 0x0003,
52};
53
54static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
55{
56 int ret = 0;
57
58 if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
59 return -1;
60
61 if (location != 5)
62 ret = mii_regs[location];
63 else
64 ret = bus->fixed.lpa;
65
66 return ret;
67}
68
69static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
70{
71 /* do nothing */
72}
73
74int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
75{
76 const struct fs_mii_bus_info *bi = bus->bus_info;
77
78 bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
79
80 /* if speed is fixed at 10Mb, remove 100Mb modes */
81 if (bi->i.fixed.speed == 10)
82 bus->fixed.lpa &= ~LPA_100;
83
84 /* if duplex is half, remove full duplex modes */
85 if (bi->i.fixed.duplex == 0)
86 bus->fixed.lpa &= ~LPA_DUPLEX;
87
88 bus->mii_read = mii_read;
89 bus->mii_write = mii_write;
90
91 return 0;
92}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 85d6dc005be0..3e9accf137e7 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -390,10 +390,8 @@ static void ax_changedmtu(struct mkiss *ax)
390 "MTU change cancelled.\n", 390 "MTU change cancelled.\n",
391 ax->dev->name); 391 ax->dev->name);
392 dev->mtu = ax->mtu; 392 dev->mtu = ax->mtu;
393 if (xbuff != NULL) 393 kfree(xbuff);
394 kfree(xbuff); 394 kfree(rbuff);
395 if (rbuff != NULL)
396 kfree(rbuff);
397 return; 395 return;
398 } 396 }
399 397
diff --git a/drivers/net/ibm_emac/Makefile b/drivers/net/ibm_emac/Makefile
index 7f583a333c24..f98ddf0e807a 100644
--- a/drivers/net/ibm_emac/Makefile
+++ b/drivers/net/ibm_emac/Makefile
@@ -1,12 +1,11 @@
1# 1#
2# Makefile for the IBM PPC4xx EMAC controllers 2# Makefile for the PowerPC 4xx on-chip ethernet driver
3# 3#
4 4
5obj-$(CONFIG_IBM_EMAC) += ibm_emac.o 5obj-$(CONFIG_IBM_EMAC) += ibm_emac.o
6 6
7ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o 7ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o
8 8ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += ibm_emac_zmii.o
9# Only need this if you want to see additional debug messages 9ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += ibm_emac_rgmii.o
10ifeq ($(CONFIG_IBM_EMAC_ERRMSG), y) 10ibm_emac-$(CONFIG_IBM_EMAC_TAH) += ibm_emac_tah.o
11ibm_emac-objs += ibm_emac_debug.o 11ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += ibm_emac_debug.o
12endif
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
index 15d5a0e82862..28c476f28c20 100644
--- a/drivers/net/ibm_emac/ibm_emac.h
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -1,110 +1,142 @@
1/* 1/*
2 * ibm_emac.h 2 * drivers/net/ibm_emac/ibm_emac.h
3 * 3 *
4 * Register definitions for PowerPC 4xx on-chip ethernet contoller
4 * 5 *
5 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
6 * June, 2002 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
7 * 8 *
8 * Copyright 2002 MontaVista Softare Inc. 9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * Armin Kuster <akuster@mvista.com>
12 * Copyright 2002-2004 MontaVista Software Inc.
9 * 13 *
10 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 17 * option) any later version.
18 *
14 */ 19 */
20#ifndef __IBM_EMAC_H_
21#define __IBM_EMAC_H_
22
23#include <linux/config.h>
24#include <linux/types.h>
25
26/* This is a simple check to prevent use of this driver on non-tested SoCs */
27#if !defined(CONFIG_405GP) && !defined(CONFIG_405GPR) && !defined(CONFIG_405EP) && \
28 !defined(CONFIG_440GP) && !defined(CONFIG_440GX) && !defined(CONFIG_440SP) && \
29 !defined(CONFIG_440EP) && !defined(CONFIG_NP405H)
30#error "Unknown SoC. Please, check chip user manual and make sure EMAC defines are OK"
31#endif
32
33/* EMAC registers Write Access rules */
34struct emac_regs {
35 u32 mr0; /* special */
36 u32 mr1; /* Reset */
37 u32 tmr0; /* special */
38 u32 tmr1; /* special */
39 u32 rmr; /* Reset */
40 u32 isr; /* Always */
41 u32 iser; /* Reset */
42 u32 iahr; /* Reset, R, T */
43 u32 ialr; /* Reset, R, T */
44 u32 vtpid; /* Reset, R, T */
45 u32 vtci; /* Reset, R, T */
46 u32 ptr; /* Reset, T */
47 u32 iaht1; /* Reset, R */
48 u32 iaht2; /* Reset, R */
49 u32 iaht3; /* Reset, R */
50 u32 iaht4; /* Reset, R */
51 u32 gaht1; /* Reset, R */
52 u32 gaht2; /* Reset, R */
53 u32 gaht3; /* Reset, R */
54 u32 gaht4; /* Reset, R */
55 u32 lsah;
56 u32 lsal;
57 u32 ipgvr; /* Reset, T */
58 u32 stacr; /* special */
59 u32 trtr; /* special */
60 u32 rwmr; /* Reset */
61 u32 octx;
62 u32 ocrx;
63 u32 ipcr;
64};
65
66#if !defined(CONFIG_IBM_EMAC4)
67#define EMAC_ETHTOOL_REGS_VER 0
68#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32))
69#else
70#define EMAC_ETHTOOL_REGS_VER 1
71#define EMAC_ETHTOOL_REGS_SIZE sizeof(struct emac_regs)
72#endif
15 73
16#ifndef _IBM_EMAC_H_ 74/* EMACx_MR0 */
17#define _IBM_EMAC_H_ 75#define EMAC_MR0_RXI 0x80000000
18/* General defines needed for the driver */ 76#define EMAC_MR0_TXI 0x40000000
77#define EMAC_MR0_SRST 0x20000000
78#define EMAC_MR0_TXE 0x10000000
79#define EMAC_MR0_RXE 0x08000000
80#define EMAC_MR0_WKE 0x04000000
19 81
20/* Emac */ 82/* EMACx_MR1 */
21typedef struct emac_regs { 83#define EMAC_MR1_FDE 0x80000000
22 u32 em0mr0; 84#define EMAC_MR1_ILE 0x40000000
23 u32 em0mr1; 85#define EMAC_MR1_VLE 0x20000000
24 u32 em0tmr0; 86#define EMAC_MR1_EIFC 0x10000000
25 u32 em0tmr1; 87#define EMAC_MR1_APP 0x08000000
26 u32 em0rmr; 88#define EMAC_MR1_IST 0x01000000
27 u32 em0isr;
28 u32 em0iser;
29 u32 em0iahr;
30 u32 em0ialr;
31 u32 em0vtpid;
32 u32 em0vtci;
33 u32 em0ptr;
34 u32 em0iaht1;
35 u32 em0iaht2;
36 u32 em0iaht3;
37 u32 em0iaht4;
38 u32 em0gaht1;
39 u32 em0gaht2;
40 u32 em0gaht3;
41 u32 em0gaht4;
42 u32 em0lsah;
43 u32 em0lsal;
44 u32 em0ipgvr;
45 u32 em0stacr;
46 u32 em0trtr;
47 u32 em0rwmr;
48} emac_t;
49 89
50/* MODE REG 0 */ 90#define EMAC_MR1_MF_MASK 0x00c00000
51#define EMAC_M0_RXI 0x80000000 91#define EMAC_MR1_MF_10 0x00000000
52#define EMAC_M0_TXI 0x40000000 92#define EMAC_MR1_MF_100 0x00400000
53#define EMAC_M0_SRST 0x20000000 93#if !defined(CONFIG_IBM_EMAC4)
54#define EMAC_M0_TXE 0x10000000 94#define EMAC_MR1_MF_1000 0x00000000
55#define EMAC_M0_RXE 0x08000000 95#define EMAC_MR1_MF_1000GPCS 0x00000000
56#define EMAC_M0_WKE 0x04000000 96#define EMAC_MR1_MF_IPPA(id) 0x00000000
97#else
98#define EMAC_MR1_MF_1000 0x00800000
99#define EMAC_MR1_MF_1000GPCS 0x00c00000
100#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6)
101#endif
57 102
58/* MODE Reg 1 */ 103#define EMAC_TX_FIFO_SIZE 2048
59#define EMAC_M1_FDE 0x80000000
60#define EMAC_M1_ILE 0x40000000
61#define EMAC_M1_VLE 0x20000000
62#define EMAC_M1_EIFC 0x10000000
63#define EMAC_M1_APP 0x08000000
64#define EMAC_M1_AEMI 0x02000000
65#define EMAC_M1_IST 0x01000000
66#define EMAC_M1_MF_1000GPCS 0x00c00000 /* Internal GPCS */
67#define EMAC_M1_MF_1000MBPS 0x00800000 /* External GPCS */
68#define EMAC_M1_MF_100MBPS 0x00400000
69#define EMAC_M1_RFS_16K 0x00280000 /* 000 for 512 byte */
70#define EMAC_M1_TR 0x00008000
71#ifdef CONFIG_IBM_EMAC4
72#define EMAC_M1_RFS_8K 0x00200000
73#define EMAC_M1_RFS_4K 0x00180000
74#define EMAC_M1_RFS_2K 0x00100000
75#define EMAC_M1_RFS_1K 0x00080000
76#define EMAC_M1_TX_FIFO_16K 0x00050000 /* 0's for 512 byte */
77#define EMAC_M1_TX_FIFO_8K 0x00040000
78#define EMAC_M1_TX_FIFO_4K 0x00030000
79#define EMAC_M1_TX_FIFO_2K 0x00020000
80#define EMAC_M1_TX_FIFO_1K 0x00010000
81#define EMAC_M1_TX_TR 0x00008000
82#define EMAC_M1_TX_MWSW 0x00001000 /* 0 wait for status */
83#define EMAC_M1_JUMBO_ENABLE 0x00000800 /* Upt to 9Kr status */
84#define EMAC_M1_OPB_CLK_66 0x00000008 /* 66Mhz */
85#define EMAC_M1_OPB_CLK_83 0x00000010 /* 83Mhz */
86#define EMAC_M1_OPB_CLK_100 0x00000018 /* 100Mhz */
87#define EMAC_M1_OPB_CLK_100P 0x00000020 /* 100Mhz+ */
88#else /* CONFIG_IBM_EMAC4 */
89#define EMAC_M1_RFS_4K 0x00300000 /* ~4k for 512 byte */
90#define EMAC_M1_RFS_2K 0x00200000
91#define EMAC_M1_RFS_1K 0x00100000
92#define EMAC_M1_TX_FIFO_2K 0x00080000 /* 0's for 512 byte */
93#define EMAC_M1_TX_FIFO_1K 0x00040000
94#define EMAC_M1_TR0_DEPEND 0x00010000 /* 0'x for single packet */
95#define EMAC_M1_TR1_DEPEND 0x00004000
96#define EMAC_M1_TR1_MULTI 0x00002000
97#define EMAC_M1_JUMBO_ENABLE 0x00001000
98#endif /* CONFIG_IBM_EMAC4 */
99#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \
100 EMAC_M1_APP | \
101 EMAC_M1_TR | EMAC_M1_VLE)
102 104
103/* Transmit Mode Register 0 */ 105#if !defined(CONFIG_IBM_EMAC4)
104#define EMAC_TMR0_GNP0 0x80000000 106#define EMAC_MR1_RFS_4K 0x00300000
105#define EMAC_TMR0_GNP1 0x40000000 107#define EMAC_MR1_RFS_16K 0x00000000
106#define EMAC_TMR0_GNPD 0x20000000 108#define EMAC_RX_FIFO_SIZE(gige) 4096
107#define EMAC_TMR0_FC 0x10000000 109#define EMAC_MR1_TFS_2K 0x00080000
110#define EMAC_MR1_TR0_MULT 0x00008000
111#define EMAC_MR1_JPSM 0x00000000
112#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
113#else
114#define EMAC_MR1_RFS_4K 0x00180000
115#define EMAC_MR1_RFS_16K 0x00280000
116#define EMAC_RX_FIFO_SIZE(gige) ((gige) ? 16384 : 4096)
117#define EMAC_MR1_TFS_2K 0x00020000
118#define EMAC_MR1_TR 0x00008000
119#define EMAC_MR1_MWSW_001 0x00001000
120#define EMAC_MR1_JPSM 0x00000800
121#define EMAC_MR1_OBCI_MASK 0x00000038
122#define EMAC_MR1_OBCI_50 0x00000000
123#define EMAC_MR1_OBCI_66 0x00000008
124#define EMAC_MR1_OBCI_83 0x00000010
125#define EMAC_MR1_OBCI_100 0x00000018
126#define EMAC_MR1_OBCI_100P 0x00000020
127#define EMAC_MR1_OBCI(freq) ((freq) <= 50 ? EMAC_MR1_OBCI_50 : \
128 (freq) <= 66 ? EMAC_MR1_OBCI_66 : \
129 (freq) <= 83 ? EMAC_MR1_OBCI_83 : \
130 (freq) <= 100 ? EMAC_MR1_OBCI_100 : EMAC_MR1_OBCI_100P)
131#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR | \
132 EMAC_MR1_MWSW_001 | EMAC_MR1_OBCI(opb))
133#endif
134
135/* EMACx_TMR0 */
136#define EMAC_TMR0_GNP 0x80000000
137#if !defined(CONFIG_IBM_EMAC4)
138#define EMAC_TMR0_DEFAULT 0x00000000
139#else
108#define EMAC_TMR0_TFAE_2_32 0x00000001 140#define EMAC_TMR0_TFAE_2_32 0x00000001
109#define EMAC_TMR0_TFAE_4_64 0x00000002 141#define EMAC_TMR0_TFAE_4_64 0x00000002
110#define EMAC_TMR0_TFAE_8_128 0x00000003 142#define EMAC_TMR0_TFAE_8_128 0x00000003
@@ -112,14 +144,36 @@ typedef struct emac_regs {
112#define EMAC_TMR0_TFAE_32_512 0x00000005 144#define EMAC_TMR0_TFAE_32_512 0x00000005
113#define EMAC_TMR0_TFAE_64_1024 0x00000006 145#define EMAC_TMR0_TFAE_64_1024 0x00000006
114#define EMAC_TMR0_TFAE_128_2048 0x00000007 146#define EMAC_TMR0_TFAE_128_2048 0x00000007
147#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
148#endif
149#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT)
150
151/* EMACx_TMR1 */
152
153/* IBM manuals are not very clear here.
154 * This is my interpretation of how things are. --ebs
155 */
156#if defined(CONFIG_40x)
157#define EMAC_FIFO_ENTRY_SIZE 8
158#define EMAC_MAL_BURST_SIZE (16 * 4)
159#else
160#define EMAC_FIFO_ENTRY_SIZE 16
161#define EMAC_MAL_BURST_SIZE (64 * 4)
162#endif
163
164#if !defined(CONFIG_IBM_EMAC4)
165#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16))
166#else
167#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14))
168#endif
115 169
116/* Receive Mode Register */ 170/* EMACx_RMR */
117#define EMAC_RMR_SP 0x80000000 171#define EMAC_RMR_SP 0x80000000
118#define EMAC_RMR_SFCS 0x40000000 172#define EMAC_RMR_SFCS 0x40000000
119#define EMAC_RMR_ARRP 0x20000000 173#define EMAC_RMR_RRP 0x20000000
120#define EMAC_RMR_ARP 0x10000000 174#define EMAC_RMR_RFP 0x10000000
121#define EMAC_RMR_AROP 0x08000000 175#define EMAC_RMR_ROP 0x08000000
122#define EMAC_RMR_ARPI 0x04000000 176#define EMAC_RMR_RPIR 0x04000000
123#define EMAC_RMR_PPP 0x02000000 177#define EMAC_RMR_PPP 0x02000000
124#define EMAC_RMR_PME 0x01000000 178#define EMAC_RMR_PME 0x01000000
125#define EMAC_RMR_PMME 0x00800000 179#define EMAC_RMR_PMME 0x00800000
@@ -127,6 +181,9 @@ typedef struct emac_regs {
127#define EMAC_RMR_MIAE 0x00200000 181#define EMAC_RMR_MIAE 0x00200000
128#define EMAC_RMR_BAE 0x00100000 182#define EMAC_RMR_BAE 0x00100000
129#define EMAC_RMR_MAE 0x00080000 183#define EMAC_RMR_MAE 0x00080000
184#if !defined(CONFIG_IBM_EMAC4)
185#define EMAC_RMR_BASE 0x00000000
186#else
130#define EMAC_RMR_RFAF_2_32 0x00000001 187#define EMAC_RMR_RFAF_2_32 0x00000001
131#define EMAC_RMR_RFAF_4_64 0x00000002 188#define EMAC_RMR_RFAF_4_64 0x00000002
132#define EMAC_RMR_RFAF_8_128 0x00000003 189#define EMAC_RMR_RFAF_8_128 0x00000003
@@ -134,9 +191,21 @@ typedef struct emac_regs {
134#define EMAC_RMR_RFAF_32_512 0x00000005 191#define EMAC_RMR_RFAF_32_512 0x00000005
135#define EMAC_RMR_RFAF_64_1024 0x00000006 192#define EMAC_RMR_RFAF_64_1024 0x00000006
136#define EMAC_RMR_RFAF_128_2048 0x00000007 193#define EMAC_RMR_RFAF_128_2048 0x00000007
137#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE) 194#define EMAC_RMR_BASE EMAC_RMR_RFAF_128_2048
195#endif
138 196
139/* Interrupt Status & enable Regs */ 197/* EMACx_ISR & EMACx_ISER */
198#if !defined(CONFIG_IBM_EMAC4)
199#define EMAC_ISR_TXPE 0x00000000
200#define EMAC_ISR_RXPE 0x00000000
201#define EMAC_ISR_TXUE 0x00000000
202#define EMAC_ISR_RXOE 0x00000000
203#else
204#define EMAC_ISR_TXPE 0x20000000
205#define EMAC_ISR_RXPE 0x10000000
206#define EMAC_ISR_TXUE 0x08000000
207#define EMAC_ISR_RXOE 0x04000000
208#endif
140#define EMAC_ISR_OVR 0x02000000 209#define EMAC_ISR_OVR 0x02000000
141#define EMAC_ISR_PP 0x01000000 210#define EMAC_ISR_PP 0x01000000
142#define EMAC_ISR_BP 0x00800000 211#define EMAC_ISR_BP 0x00800000
@@ -147,53 +216,62 @@ typedef struct emac_regs {
147#define EMAC_ISR_PTLE 0x00040000 216#define EMAC_ISR_PTLE 0x00040000
148#define EMAC_ISR_ORE 0x00020000 217#define EMAC_ISR_ORE 0x00020000
149#define EMAC_ISR_IRE 0x00010000 218#define EMAC_ISR_IRE 0x00010000
150#define EMAC_ISR_DBDM 0x00000200 219#define EMAC_ISR_SQE 0x00000080
151#define EMAC_ISR_DB0 0x00000100 220#define EMAC_ISR_TE 0x00000040
152#define EMAC_ISR_SE0 0x00000080
153#define EMAC_ISR_TE0 0x00000040
154#define EMAC_ISR_DB1 0x00000020
155#define EMAC_ISR_SE1 0x00000010
156#define EMAC_ISR_TE1 0x00000008
157#define EMAC_ISR_MOS 0x00000002 221#define EMAC_ISR_MOS 0x00000002
158#define EMAC_ISR_MOF 0x00000001 222#define EMAC_ISR_MOF 0x00000001
159 223
160/* STA CONTROL REG */ 224/* EMACx_STACR */
225#define EMAC_STACR_PHYD_MASK 0xffff
226#define EMAC_STACR_PHYD_SHIFT 16
161#define EMAC_STACR_OC 0x00008000 227#define EMAC_STACR_OC 0x00008000
162#define EMAC_STACR_PHYE 0x00004000 228#define EMAC_STACR_PHYE 0x00004000
163#define EMAC_STACR_WRITE 0x00002000 229#define EMAC_STACR_STAC_MASK 0x00003000
164#define EMAC_STACR_READ 0x00001000 230#define EMAC_STACR_STAC_READ 0x00001000
165#define EMAC_STACR_CLK_83MHZ 0x00000800 /* 0's for 50Mhz */ 231#define EMAC_STACR_STAC_WRITE 0x00002000
166#define EMAC_STACR_CLK_66MHZ 0x00000400 232#if !defined(CONFIG_IBM_EMAC4)
167#define EMAC_STACR_CLK_100MHZ 0x00000C00 233#define EMAC_STACR_OPBC_MASK 0x00000C00
234#define EMAC_STACR_OPBC_50 0x00000000
235#define EMAC_STACR_OPBC_66 0x00000400
236#define EMAC_STACR_OPBC_83 0x00000800
237#define EMAC_STACR_OPBC_100 0x00000C00
238#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \
239 (freq) <= 66 ? EMAC_STACR_OPBC_66 : \
240 (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100)
241#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb)
242#else
243#define EMAC_STACR_BASE(opb) 0x00000000
244#endif
245#define EMAC_STACR_PCDA_MASK 0x1f
246#define EMAC_STACR_PCDA_SHIFT 5
247#define EMAC_STACR_PRA_MASK 0x1f
248
249/* EMACx_TRTR */
250#if !defined(CONFIG_IBM_EMAC4)
251#define EMAC_TRTR_SHIFT 27
252#else
253#define EMAC_TRTR_SHIFT 24
254#endif
255#define EMAC_TRTR(size) ((((size) >> 6) - 1) << EMAC_TRTR_SHIFT)
168 256
169/* Transmit Request Threshold Register */ 257/* EMACx_RWMR */
170#define EMAC_TRTR_1600 0x18000000 /* 0's for 64 Bytes */ 258#if !defined(CONFIG_IBM_EMAC4)
171#define EMAC_TRTR_1024 0x0f000000 259#define EMAC_RWMR(l,h) (((l) << 23) | ( ((h) & 0x1ff) << 7))
172#define EMAC_TRTR_512 0x07000000 260#else
173#define EMAC_TRTR_256 0x03000000 261#define EMAC_RWMR(l,h) (((l) << 22) | ( ((h) & 0x3ff) << 6))
174#define EMAC_TRTR_192 0x10000000 262#endif
175#define EMAC_TRTR_128 0x01000000
176 263
264/* EMAC specific TX descriptor control fields (write access) */
177#define EMAC_TX_CTRL_GFCS 0x0200 265#define EMAC_TX_CTRL_GFCS 0x0200
178#define EMAC_TX_CTRL_GP 0x0100 266#define EMAC_TX_CTRL_GP 0x0100
179#define EMAC_TX_CTRL_ISA 0x0080 267#define EMAC_TX_CTRL_ISA 0x0080
180#define EMAC_TX_CTRL_RSA 0x0040 268#define EMAC_TX_CTRL_RSA 0x0040
181#define EMAC_TX_CTRL_IVT 0x0020 269#define EMAC_TX_CTRL_IVT 0x0020
182#define EMAC_TX_CTRL_RVT 0x0010 270#define EMAC_TX_CTRL_RVT 0x0010
183#define EMAC_TX_CTRL_TAH_CSUM 0x000e /* TAH only */ 271#define EMAC_TX_CTRL_TAH_CSUM 0x000e
184#define EMAC_TX_CTRL_TAH_SEG4 0x000a /* TAH only */
185#define EMAC_TX_CTRL_TAH_SEG3 0x0008 /* TAH only */
186#define EMAC_TX_CTRL_TAH_SEG2 0x0006 /* TAH only */
187#define EMAC_TX_CTRL_TAH_SEG1 0x0004 /* TAH only */
188#define EMAC_TX_CTRL_TAH_SEG0 0x0002 /* TAH only */
189#define EMAC_TX_CTRL_TAH_DIS 0x0000 /* TAH only */
190 272
191#define EMAC_TX_CTRL_DFLT ( \ 273/* EMAC specific TX descriptor status fields (read access) */
192 MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP )
193
194/* madmal transmit status / Control bits */
195#define EMAC_TX_ST_BFCS 0x0200 274#define EMAC_TX_ST_BFCS 0x0200
196#define EMAC_TX_ST_BPP 0x0100
197#define EMAC_TX_ST_LCS 0x0080 275#define EMAC_TX_ST_LCS 0x0080
198#define EMAC_TX_ST_ED 0x0040 276#define EMAC_TX_ST_ED 0x0040
199#define EMAC_TX_ST_EC 0x0020 277#define EMAC_TX_ST_EC 0x0020
@@ -202,8 +280,16 @@ typedef struct emac_regs {
202#define EMAC_TX_ST_SC 0x0004 280#define EMAC_TX_ST_SC 0x0004
203#define EMAC_TX_ST_UR 0x0002 281#define EMAC_TX_ST_UR 0x0002
204#define EMAC_TX_ST_SQE 0x0001 282#define EMAC_TX_ST_SQE 0x0001
283#if !defined(CONFIG_IBM_EMAC_TAH)
284#define EMAC_IS_BAD_TX(v) ((v) & (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
285 EMAC_TX_ST_EC | EMAC_TX_ST_LC | \
286 EMAC_TX_ST_MC | EMAC_TX_ST_UR))
287#else
288#define EMAC_IS_BAD_TX(v) ((v) & (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
289 EMAC_TX_ST_EC | EMAC_TX_ST_LC))
290#endif
205 291
206/* madmal receive status / Control bits */ 292/* EMAC specific RX descriptor status fields (read access) */
207#define EMAC_RX_ST_OE 0x0200 293#define EMAC_RX_ST_OE 0x0200
208#define EMAC_RX_ST_PP 0x0100 294#define EMAC_RX_ST_PP 0x0100
209#define EMAC_RX_ST_BP 0x0080 295#define EMAC_RX_ST_BP 0x0080
@@ -214,54 +300,10 @@ typedef struct emac_regs {
214#define EMAC_RX_ST_PTL 0x0004 300#define EMAC_RX_ST_PTL 0x0004
215#define EMAC_RX_ST_ORE 0x0002 301#define EMAC_RX_ST_ORE 0x0002
216#define EMAC_RX_ST_IRE 0x0001 302#define EMAC_RX_ST_IRE 0x0001
217#define EMAC_BAD_RX_PACKET 0x02ff 303#define EMAC_RX_TAH_BAD_CSUM 0x0003
218#define EMAC_CSUM_VER_ERROR 0x0003 304#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \
219 305 EMAC_RX_ST_RP | EMAC_RX_ST_SE | \
220/* identify a bad rx packet dependent on emac features */ 306 EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
221#ifdef CONFIG_IBM_EMAC4 307 EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
222#define EMAC_IS_BAD_RX_PACKET(desc) \ 308 EMAC_RX_ST_IRE )
223 (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \ 309#endif /* __IBM_EMAC_H_ */
224 ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \
225 ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE)))
226#else
227#define EMAC_IS_BAD_RX_PACKET(desc) \
228 (desc & EMAC_BAD_RX_PACKET)
229#endif
230
231/* SoC implementation specific EMAC register defaults */
232#if defined(CONFIG_440GP)
233#define EMAC_RWMR_DEFAULT 0x80009000
234#define EMAC_TMR0_DEFAULT 0x00000000
235#define EMAC_TMR1_DEFAULT 0xf8640000
236#elif defined(CONFIG_440GX)
237#define EMAC_RWMR_DEFAULT 0x1000a200
238#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
239#define EMAC_TMR1_DEFAULT 0xa00f0000
240#elif defined(CONFIG_440SP)
241#define EMAC_RWMR_DEFAULT 0x08002000
242#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048
243#define EMAC_TMR1_DEFAULT 0xf8200000
244#else
245#define EMAC_RWMR_DEFAULT 0x0f002000
246#define EMAC_TMR0_DEFAULT 0x00000000
247#define EMAC_TMR1_DEFAULT 0x380f0000
248#endif /* CONFIG_440GP */
249
250/* Revision specific EMAC register defaults */
251#ifdef CONFIG_IBM_EMAC4
252#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \
253 EMAC_M1_OPB_CLK_83 | \
254 EMAC_M1_TX_MWSW)
255#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \
256 EMAC_RMR_RFAF_128_2048)
257#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \
258 EMAC_TMR0_DEFAULT)
259#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024
260#else /* !CONFIG_IBM_EMAC4 */
261#define EMAC_M1_DEFAULT EMAC_M1_BASE
262#define EMAC_RMR_DEFAULT EMAC_RMR_BASE
263#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0
264#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600
265#endif /* CONFIG_IBM_EMAC4 */
266
267#endif
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 14e9b6315f20..943fbd1546ff 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * ibm_emac_core.c 2 * drivers/net/ibm_emac/ibm_emac_core.c
3 * 3 *
4 * Ethernet driver for the built in ethernet on the IBM 4xx PowerPC 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * processors.
6 *
7 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
8 * 5 *
9 * Based on original work by 6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
10 * 8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
11 * Armin Kuster <akuster@mvista.com> 12 * Armin Kuster <akuster@mvista.com>
12 * Johnnie Peters <jpeters@mvista.com> 13 * Johnnie Peters <jpeters@mvista.com>
13 * 14 *
@@ -15,29 +16,24 @@
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version. 18 * option) any later version.
18 * TODO 19 *
19 * - Check for races in the "remove" code path
20 * - Add some Power Management to the MAC and the PHY
21 * - Audit remaining of non-rewritten code (--BenH)
22 * - Cleanup message display using msglevel mecanism
23 * - Address all errata
24 * - Audit all register update paths to ensure they
25 * are being written post soft reset if required.
26 */ 20 */
21
22#include <linux/config.h>
27#include <linux/module.h> 23#include <linux/module.h>
28#include <linux/kernel.h> 24#include <linux/kernel.h>
29#include <linux/sched.h> 25#include <linux/sched.h>
30#include <linux/string.h> 26#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/ptrace.h>
33#include <linux/errno.h> 27#include <linux/errno.h>
34#include <linux/ioport.h>
35#include <linux/slab.h>
36#include <linux/interrupt.h> 28#include <linux/interrupt.h>
37#include <linux/delay.h> 29#include <linux/delay.h>
38#include <linux/init.h> 30#include <linux/init.h>
39#include <linux/types.h> 31#include <linux/types.h>
40#include <linux/dma-mapping.h> 32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
41#include <linux/ethtool.h> 37#include <linux/ethtool.h>
42#include <linux/mii.h> 38#include <linux/mii.h>
43#include <linux/bitops.h> 39#include <linux/bitops.h>
@@ -45,1691 +41,1893 @@
45#include <asm/processor.h> 41#include <asm/processor.h>
46#include <asm/io.h> 42#include <asm/io.h>
47#include <asm/dma.h> 43#include <asm/dma.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h> 44#include <asm/uaccess.h>
50#include <asm/ocp.h> 45#include <asm/ocp.h>
51 46
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/skbuff.h>
55#include <linux/crc32.h>
56
57#include "ibm_emac_core.h" 47#include "ibm_emac_core.h"
58 48#include "ibm_emac_debug.h"
59//#define MDIO_DEBUG(fmt) printk fmt
60#define MDIO_DEBUG(fmt)
61
62//#define LINK_DEBUG(fmt) printk fmt
63#define LINK_DEBUG(fmt)
64
65//#define PKT_DEBUG(fmt) printk fmt
66#define PKT_DEBUG(fmt)
67
68#define DRV_NAME "emac"
69#define DRV_VERSION "2.0"
70#define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
71#define DRV_DESC "IBM EMAC Ethernet driver"
72 49
73/* 50/*
74 * When mdio_idx >= 0, contains a list of emac ocp_devs 51 * Lack of dma_unmap_???? calls is intentional.
75 * that have had their initialization deferred until the 52 *
76 * common MDIO controller has been initialized. 53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
77 */ 65 */
78LIST_HEAD(emac_init_list);
79 66
80MODULE_AUTHOR(DRV_AUTHOR); 67#define DRV_NAME "emac"
68#define DRV_VERSION "3.53"
69#define DRV_DESC "PPC 4xx OCP EMAC driver"
70
81MODULE_DESCRIPTION(DRV_DESC); 71MODULE_DESCRIPTION(DRV_DESC);
72MODULE_AUTHOR
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
82MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
83 75
84static int skb_res = SKB_RES; 76/* minimum number of free TX descriptors required to wake up TX process */
85module_param(skb_res, int, 0444); 77#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
87 "The 405 handles a misaligned IP header fine but\n"
88 "this can help if you are routing to a tunnel or a\n"
89 "device that needs aligned data. 0..2");
90
91#define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
92 78
93static unsigned int rgmii_enable[] = { 79/* If packet size is less than this number, we allocate small skb and copy packet
94 RGMII_RTBI, 80 * contents into it instead of just sending original big skb up
95 RGMII_RGMII, 81 */
96 RGMII_TBI, 82#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
97 RGMII_GMII
98};
99 83
100static unsigned int rgmii_speed_mask[] = { 84/* Since multiple EMACs share MDIO lines in various ways, we need
101 RGMII_MII2_SPDMASK, 85 * to avoid re-using the same PHY ID in cases where the arch didn't
102 RGMII_MII3_SPDMASK 86 * setup precise phy_map entries
103}; 87 */
88static u32 busy_phy_map;
104 89
105static unsigned int rgmii_speed100[] = { 90#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP))
106 RGMII_MII2_100MB, 91/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
107 RGMII_MII3_100MB 92 * with PHY RX clock problem.
108}; 93 * 440EP has more sane SDR0_MFR register implementation than 440GX, which
94 * also allows controlling each EMAC clock
95 */
96static inline void EMAC_RX_CLK_TX(int idx)
97{
98 unsigned long flags;
99 local_irq_save(flags);
109 100
110static unsigned int rgmii_speed1000[] = { 101#if defined(CONFIG_405EP)
111 RGMII_MII2_1000MB, 102 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
112 RGMII_MII3_1000MB 103#else /* CONFIG_440EP */
113}; 104 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
105#endif
114 106
115#define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev)) 107 local_irq_restore(flags);
108}
116 109
117static unsigned int zmii_enable[][4] = { 110static inline void EMAC_RX_CLK_DEFAULT(int idx)
118 {ZMII_SMII0, ZMII_RMII0, ZMII_MII0, 111{
119 ~(ZMII_MDI1 | ZMII_MDI2 | ZMII_MDI3)}, 112 unsigned long flags;
120 {ZMII_SMII1, ZMII_RMII1, ZMII_MII1, 113 local_irq_save(flags);
121 ~(ZMII_MDI0 | ZMII_MDI2 | ZMII_MDI3)},
122 {ZMII_SMII2, ZMII_RMII2, ZMII_MII2,
123 ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
124 {ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
125};
126 114
127static unsigned int mdi_enable[] = { 115#if defined(CONFIG_405EP)
128 ZMII_MDI0, 116 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
129 ZMII_MDI1, 117#else /* CONFIG_440EP */
130 ZMII_MDI2, 118 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
131 ZMII_MDI3 119#endif
132};
133 120
134static unsigned int zmii_speed = 0x0; 121 local_irq_restore(flags);
135static unsigned int zmii_speed100[] = { 122}
136 ZMII_MII0_100MB, 123#else
137 ZMII_MII1_100MB, 124#define EMAC_RX_CLK_TX(idx) ((void)0)
138 ZMII_MII2_100MB, 125#define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
139 ZMII_MII3_100MB 126#endif
140};
141 127
142/* Since multiple EMACs share MDIO lines in various ways, we need 128#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
143 * to avoid re-using the same PHY ID in cases where the arch didn't 129/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
144 * setup precise phy_map entries 130 * unfortunately this is less flexible than 440EP case, because it's a global
131 * setting for all EMACs, therefore we do this clock trick only during probe.
145 */ 132 */
146static u32 busy_phy_map = 0; 133#define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135#define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
136 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
137#else
138#define EMAC_CLK_INTERNAL ((void)0)
139#define EMAC_CLK_EXTERNAL ((void)0)
140#endif
147 141
148/* If EMACs share a common MDIO device, this points to it */ 142/* I don't want to litter system log with timeout errors
149static struct net_device *mdio_ndev = NULL; 143 * when we have brain-damaged PHY.
144 */
145static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
146 const char *error)
147{
148#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149 DBG("%d: %s" NL, dev->def->index, error);
150#else
151 if (net_ratelimit())
152 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
153#endif
154}
150 155
151struct emac_def_dev { 156/* PHY polling intervals */
152 struct list_head link; 157#define PHY_POLL_LINK_ON HZ
153 struct ocp_device *ocpdev; 158#define PHY_POLL_LINK_OFF (HZ / 5)
154 struct ibm_ocp_mal *mal; 159
160/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
161static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
162 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
163 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
164 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
165 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
166 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
167 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
168 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
169 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
170 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
171 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
172 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
173 "tx_bd_excessive_collisions", "tx_bd_late_collision",
174 "tx_bd_multple_collisions", "tx_bd_single_collision",
175 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
176 "tx_errors"
155}; 177};
156 178
157static struct net_device_stats *emac_stats(struct net_device *dev) 179static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
180static void emac_clean_tx_ring(struct ocp_enet_private *dev);
181
182static inline int emac_phy_supports_gige(int phy_mode)
158{ 183{
159 struct ocp_enet_private *fep = dev->priv; 184 return phy_mode == PHY_MODE_GMII ||
160 return &fep->stats; 185 phy_mode == PHY_MODE_RGMII ||
161}; 186 phy_mode == PHY_MODE_TBI ||
187 phy_mode == PHY_MODE_RTBI;
188}
162 189
163static int 190static inline int emac_phy_gpcs(int phy_mode)
164emac_init_rgmii(struct ocp_device *rgmii_dev, int input, int phy_mode)
165{ 191{
166 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(rgmii_dev); 192 return phy_mode == PHY_MODE_TBI ||
167 const char *mode_name[] = { "RTBI", "RGMII", "TBI", "GMII" }; 193 phy_mode == PHY_MODE_RTBI;
168 int mode = -1; 194}
169 195
170 if (!rgmii) { 196static inline void emac_tx_enable(struct ocp_enet_private *dev)
171 rgmii = kmalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL); 197{
198 struct emac_regs *p = dev->emacp;
199 unsigned long flags;
200 u32 r;
172 201
173 if (rgmii == NULL) { 202 local_irq_save(flags);
174 printk(KERN_ERR
175 "rgmii%d: Out of memory allocating RGMII structure!\n",
176 rgmii_dev->def->index);
177 return -ENOMEM;
178 }
179 203
180 memset(rgmii, 0, sizeof(*rgmii)); 204 DBG("%d: tx_enable" NL, dev->def->index);
181 205
182 rgmii->base = 206 r = in_be32(&p->mr0);
183 (struct rgmii_regs *)ioremap(rgmii_dev->def->paddr, 207 if (!(r & EMAC_MR0_TXE))
184 sizeof(*rgmii->base)); 208 out_be32(&p->mr0, r | EMAC_MR0_TXE);
185 if (rgmii->base == NULL) { 209 local_irq_restore(flags);
186 printk(KERN_ERR 210}
187 "rgmii%d: Cannot ioremap bridge registers!\n",
188 rgmii_dev->def->index);
189 211
190 kfree(rgmii); 212static void emac_tx_disable(struct ocp_enet_private *dev)
191 return -ENOMEM; 213{
192 } 214 struct emac_regs *p = dev->emacp;
193 ocp_set_drvdata(rgmii_dev, rgmii); 215 unsigned long flags;
194 } 216 u32 r;
195 217
196 if (phy_mode) { 218 local_irq_save(flags);
197 switch (phy_mode) { 219
198 case PHY_MODE_GMII: 220 DBG("%d: tx_disable" NL, dev->def->index);
199 mode = GMII;
200 break;
201 case PHY_MODE_TBI:
202 mode = TBI;
203 break;
204 case PHY_MODE_RTBI:
205 mode = RTBI;
206 break;
207 case PHY_MODE_RGMII:
208 default:
209 mode = RGMII;
210 }
211 rgmii->base->fer &= ~RGMII_FER_MASK(input);
212 rgmii->base->fer |= rgmii_enable[mode] << (4 * input);
213 } else {
214 switch ((rgmii->base->fer & RGMII_FER_MASK(input)) >> (4 *
215 input)) {
216 case RGMII_RTBI:
217 mode = RTBI;
218 break;
219 case RGMII_RGMII:
220 mode = RGMII;
221 break;
222 case RGMII_TBI:
223 mode = TBI;
224 break;
225 case RGMII_GMII:
226 mode = GMII;
227 }
228 }
229 221
230 /* Set mode to RGMII if nothing valid is detected */ 222 r = in_be32(&p->mr0);
231 if (mode < 0) 223 if (r & EMAC_MR0_TXE) {
232 mode = RGMII; 224 int n = 300;
225 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
226 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
227 --n;
228 if (unlikely(!n))
229 emac_report_timeout_error(dev, "TX disable timeout");
230 }
231 local_irq_restore(flags);
232}
233 233
234 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n", 234static void emac_rx_enable(struct ocp_enet_private *dev)
235 rgmii_dev->def->index, input, mode_name[mode]); 235{
236 struct emac_regs *p = dev->emacp;
237 unsigned long flags;
238 u32 r;
236 239
237 rgmii->mode[input] = mode; 240 local_irq_save(flags);
238 rgmii->users++; 241 if (unlikely(dev->commac.rx_stopped))
242 goto out;
239 243
240 return 0; 244 DBG("%d: rx_enable" NL, dev->def->index);
245
246 r = in_be32(&p->mr0);
247 if (!(r & EMAC_MR0_RXE)) {
248 if (unlikely(!(r & EMAC_MR0_RXI))) {
249 /* Wait if previous async disable is still in progress */
250 int n = 100;
251 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
252 --n;
253 if (unlikely(!n))
254 emac_report_timeout_error(dev,
255 "RX disable timeout");
256 }
257 out_be32(&p->mr0, r | EMAC_MR0_RXE);
258 }
259 out:
260 local_irq_restore(flags);
241} 261}
242 262
243static void 263static void emac_rx_disable(struct ocp_enet_private *dev)
244emac_rgmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
245{ 264{
246 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev); 265 struct emac_regs *p = dev->emacp;
247 unsigned int rgmii_speed; 266 unsigned long flags;
248 267 u32 r;
249 rgmii_speed = in_be32(&rgmii->base->ssr);
250 268
251 rgmii_speed &= ~rgmii_speed_mask[input]; 269 local_irq_save(flags);
252 270
253 if (speed == 1000) 271 DBG("%d: rx_disable" NL, dev->def->index);
254 rgmii_speed |= rgmii_speed1000[input];
255 else if (speed == 100)
256 rgmii_speed |= rgmii_speed100[input];
257 272
258 out_be32(&rgmii->base->ssr, rgmii_speed); 273 r = in_be32(&p->mr0);
274 if (r & EMAC_MR0_RXE) {
275 int n = 300;
276 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
277 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
278 --n;
279 if (unlikely(!n))
280 emac_report_timeout_error(dev, "RX disable timeout");
281 }
282 local_irq_restore(flags);
259} 283}
260 284
261static void emac_close_rgmii(struct ocp_device *ocpdev) 285static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
262{ 286{
263 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev); 287 struct emac_regs *p = dev->emacp;
264 BUG_ON(!rgmii || rgmii->users == 0); 288 unsigned long flags;
289 u32 r;
265 290
266 if (!--rgmii->users) { 291 local_irq_save(flags);
267 ocp_set_drvdata(ocpdev, NULL); 292
268 iounmap((void *)rgmii->base); 293 DBG("%d: rx_disable_async" NL, dev->def->index);
269 kfree(rgmii); 294
270 } 295 r = in_be32(&p->mr0);
296 if (r & EMAC_MR0_RXE)
297 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 local_irq_restore(flags);
271} 299}
272 300
273static int emac_init_zmii(struct ocp_device *zmii_dev, int input, int phy_mode) 301static int emac_reset(struct ocp_enet_private *dev)
274{ 302{
275 struct ibm_ocp_zmii *zmii = ZMII_PRIV(zmii_dev); 303 struct emac_regs *p = dev->emacp;
276 const char *mode_name[] = { "SMII", "RMII", "MII" }; 304 unsigned long flags;
277 int mode = -1; 305 int n = 20;
278 306
279 if (!zmii) { 307 DBG("%d: reset" NL, dev->def->index);
280 zmii = kmalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
281 if (zmii == NULL) {
282 printk(KERN_ERR
283 "zmii%d: Out of memory allocating ZMII structure!\n",
284 zmii_dev->def->index);
285 return -ENOMEM;
286 }
287 memset(zmii, 0, sizeof(*zmii));
288 308
289 zmii->base = 309 local_irq_save(flags);
290 (struct zmii_regs *)ioremap(zmii_dev->def->paddr,
291 sizeof(*zmii->base));
292 if (zmii->base == NULL) {
293 printk(KERN_ERR
294 "zmii%d: Cannot ioremap bridge registers!\n",
295 zmii_dev->def->index);
296 310
297 kfree(zmii); 311 if (!dev->reset_failed) {
298 return -ENOMEM; 312 /* 40x erratum suggests stopping RX channel before reset,
299 } 313 * we stop TX as well
300 ocp_set_drvdata(zmii_dev, zmii); 314 */
315 emac_rx_disable(dev);
316 emac_tx_disable(dev);
301 } 317 }
302 318
303 if (phy_mode) { 319 out_be32(&p->mr0, EMAC_MR0_SRST);
304 switch (phy_mode) { 320 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
305 case PHY_MODE_MII: 321 --n;
306 mode = MII; 322 local_irq_restore(flags);
307 break; 323
308 case PHY_MODE_RMII: 324 if (n) {
309 mode = RMII; 325 dev->reset_failed = 0;
310 break; 326 return 0;
311 case PHY_MODE_SMII:
312 default:
313 mode = SMII;
314 }
315 zmii->base->fer &= ~ZMII_FER_MASK(input);
316 zmii->base->fer |= zmii_enable[input][mode];
317 } else { 327 } else {
318 switch ((zmii->base->fer & ZMII_FER_MASK(input)) << (4 * input)) { 328 emac_report_timeout_error(dev, "reset timeout");
319 case ZMII_MII0: 329 dev->reset_failed = 1;
320 mode = MII; 330 return -ETIMEDOUT;
321 break;
322 case ZMII_RMII0:
323 mode = RMII;
324 break;
325 case ZMII_SMII0:
326 mode = SMII;
327 }
328 } 331 }
332}
329 333
330 /* Set mode to SMII if nothing valid is detected */ 334static void emac_hash_mc(struct ocp_enet_private *dev)
331 if (mode < 0) 335{
332 mode = SMII; 336 struct emac_regs *p = dev->emacp;
337 u16 gaht[4] = { 0 };
338 struct dev_mc_list *dmi;
333 339
334 printk(KERN_NOTICE "zmii%d: input %d in %s mode\n", 340 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
335 zmii_dev->def->index, input, mode_name[mode]);
336 341
337 zmii->mode[input] = mode; 342 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
338 zmii->users++; 343 int bit;
344 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
345 dev->def->index,
346 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
347 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
339 348
340 return 0; 349 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
350 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
351 }
352 out_be32(&p->gaht1, gaht[0]);
353 out_be32(&p->gaht2, gaht[1]);
354 out_be32(&p->gaht3, gaht[2]);
355 out_be32(&p->gaht4, gaht[3]);
341} 356}
342 357
343static void emac_enable_zmii_port(struct ocp_device *ocpdev, int input) 358static inline u32 emac_iff2rmr(struct net_device *ndev)
344{ 359{
345 u32 mask; 360 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
346 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 361 EMAC_RMR_BASE;
347 362
348 mask = in_be32(&zmii->base->fer); 363 if (ndev->flags & IFF_PROMISC)
349 mask &= zmii_enable[input][MDI]; /* turn all non enabled MDI's off */ 364 r |= EMAC_RMR_PME;
350 mask |= zmii_enable[input][zmii->mode[input]] | mdi_enable[input]; 365 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
351 out_be32(&zmii->base->fer, mask); 366 r |= EMAC_RMR_PMME;
367 else if (ndev->mc_count > 0)
368 r |= EMAC_RMR_MAE;
369
370 return r;
352} 371}
353 372
354static void 373static inline int emac_opb_mhz(void)
355emac_zmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
356{ 374{
357 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 375 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
358
359 if (speed == 100)
360 zmii_speed |= zmii_speed100[input];
361 else
362 zmii_speed &= ~zmii_speed100[input];
363
364 out_be32(&zmii->base->ssr, zmii_speed);
365} 376}
366 377
367static void emac_close_zmii(struct ocp_device *ocpdev) 378/* BHs disabled */
379static int emac_configure(struct ocp_enet_private *dev)
368{ 380{
369 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 381 struct emac_regs *p = dev->emacp;
370 BUG_ON(!zmii || zmii->users == 0); 382 struct net_device *ndev = dev->ndev;
383 int gige;
384 u32 r;
371 385
372 if (!--zmii->users) { 386 DBG("%d: configure" NL, dev->def->index);
373 ocp_set_drvdata(ocpdev, NULL);
374 iounmap((void *)zmii->base);
375 kfree(zmii);
376 }
377}
378 387
379int emac_phy_read(struct net_device *dev, int mii_id, int reg) 388 if (emac_reset(dev) < 0)
380{ 389 return -ETIMEDOUT;
381 int count;
382 uint32_t stacr;
383 struct ocp_enet_private *fep = dev->priv;
384 emac_t *emacp = fep->emacp;
385 390
386 MDIO_DEBUG(("%s: phy_read, id: 0x%x, reg: 0x%x\n", dev->name, mii_id, 391 tah_reset(dev->tah_dev);
387 reg));
388 392
389 /* Enable proper ZMII port */ 393 /* Mode register */
390 if (fep->zmii_dev) 394 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
391 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input); 395 if (dev->phy.duplex == DUPLEX_FULL)
396 r |= EMAC_MR1_FDE;
397 switch (dev->phy.speed) {
398 case SPEED_1000:
399 if (emac_phy_gpcs(dev->phy.mode)) {
400 r |= EMAC_MR1_MF_1000GPCS |
401 EMAC_MR1_MF_IPPA(dev->phy.address);
392 402
393 /* Use the EMAC that has the MDIO port */ 403 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
394 if (fep->mdio_dev) { 404 * identify this GPCS PHY later.
395 dev = fep->mdio_dev; 405 */
396 fep = dev->priv; 406 out_be32(&p->ipcr, 0xdeadbeef);
397 emacp = fep->emacp; 407 } else
408 r |= EMAC_MR1_MF_1000;
409 r |= EMAC_MR1_RFS_16K;
410 gige = 1;
411
412 if (dev->ndev->mtu > ETH_DATA_LEN)
413 r |= EMAC_MR1_JPSM;
414 break;
415 case SPEED_100:
416 r |= EMAC_MR1_MF_100;
417 /* Fall through */
418 default:
419 r |= EMAC_MR1_RFS_4K;
420 gige = 0;
421 break;
398 } 422 }
399 423
400 count = 0; 424 if (dev->rgmii_dev)
401 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 425 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
402 && (count++ < MDIO_DELAY)) 426 dev->phy.speed);
403 udelay(1); 427 else
404 MDIO_DEBUG((" (count was %d)\n", count)); 428 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
405 429
406 if ((stacr & EMAC_STACR_OC) == 0) { 430#if !defined(CONFIG_40x)
407 printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name); 431 /* on 40x erratum forces us to NOT use integrated flow control,
408 return -1; 432 * let's hope it works on 44x ;)
433 */
434 if (dev->phy.duplex == DUPLEX_FULL) {
435 if (dev->phy.pause)
436 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
437 else if (dev->phy.asym_pause)
438 r |= EMAC_MR1_APP;
409 } 439 }
440#endif
441 out_be32(&p->mr1, r);
442
443 /* Set individual MAC address */
444 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
445 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
446 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
447 ndev->dev_addr[5]);
448
449 /* VLAN Tag Protocol ID */
450 out_be32(&p->vtpid, 0x8100);
451
452 /* Receive mode register */
453 r = emac_iff2rmr(ndev);
454 if (r & EMAC_RMR_MAE)
455 emac_hash_mc(dev);
456 out_be32(&p->rmr, r);
457
458 /* FIFOs thresholds */
459 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
460 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
461 out_be32(&p->tmr1, r);
462 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
463
464 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
465 there should be still enough space in FIFO to allow the our link
466 partner time to process this frame and also time to send PAUSE
467 frame itself.
468
469 Here is the worst case scenario for the RX FIFO "headroom"
470 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
471
472 1) One maximum-length frame on TX 1522 bytes
473 2) One PAUSE frame time 64 bytes
474 3) PAUSE frame decode time allowance 64 bytes
475 4) One maximum-length frame on RX 1522 bytes
476 5) Round-trip propagation delay of the link (100Mb) 15 bytes
477 ----------
478 3187 bytes
479
480 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
481 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
482 */
483 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
484 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
485 out_be32(&p->rwmr, r);
486
487 /* Set PAUSE timer to the maximum */
488 out_be32(&p->ptr, 0xffff);
489
490 /* IRQ sources */
491 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
492 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
493 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
494 EMAC_ISR_IRE | EMAC_ISR_TE);
495
496 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
497 if (emac_phy_gpcs(dev->phy.mode))
498 mii_reset_phy(&dev->phy);
499
500 return 0;
501}
410 502
411 /* Clear the speed bits and make a read request to the PHY */ 503/* BHs disabled */
412 stacr = ((EMAC_STACR_READ | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ); 504static void emac_reinitialize(struct ocp_enet_private *dev)
413 stacr |= ((mii_id & 0x1F) << 5); 505{
506 DBG("%d: reinitialize" NL, dev->def->index);
414 507
415 out_be32(&emacp->em0stacr, stacr); 508 if (!emac_configure(dev)) {
509 emac_tx_enable(dev);
510 emac_rx_enable(dev);
511 }
512}
416 513
417 count = 0; 514/* BHs disabled */
418 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 515static void emac_full_tx_reset(struct net_device *ndev)
419 && (count++ < MDIO_DELAY)) 516{
420 udelay(1); 517 struct ocp_enet_private *dev = ndev->priv;
421 MDIO_DEBUG((" (count was %d)\n", count)); 518 struct ocp_func_emac_data *emacdata = dev->def->additions;
422 519
423 if ((stacr & EMAC_STACR_OC) == 0) { 520 DBG("%d: full_tx_reset" NL, dev->def->index);
424 printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
425 return -1;
426 }
427 521
428 /* Check for a read error */ 522 emac_tx_disable(dev);
429 if (stacr & EMAC_STACR_PHYE) { 523 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
430 MDIO_DEBUG(("EMAC MDIO PHY error !\n")); 524 emac_clean_tx_ring(dev);
431 return -1; 525 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
432 }
433 526
434 MDIO_DEBUG((" -> 0x%x\n", stacr >> 16)); 527 emac_configure(dev);
435 528
436 return (stacr >> 16); 529 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
530 emac_tx_enable(dev);
531 emac_rx_enable(dev);
532
533 netif_wake_queue(ndev);
437} 534}
438 535
439void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data) 536static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
440{ 537{
441 int count; 538 struct emac_regs *p = dev->emacp;
442 uint32_t stacr; 539 u32 r;
443 struct ocp_enet_private *fep = dev->priv; 540 int n;
444 emac_t *emacp = fep->emacp;
445 541
446 MDIO_DEBUG(("%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x\n", 542 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
447 dev->name, mii_id, reg, data));
448 543
449 /* Enable proper ZMII port */ 544 /* Enable proper MDIO port */
450 if (fep->zmii_dev) 545 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
451 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
452 546
453 /* Use the EMAC that has the MDIO port */ 547 /* Wait for management interface to become idle */
454 if (fep->mdio_dev) { 548 n = 10;
455 dev = fep->mdio_dev; 549 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
456 fep = dev->priv; 550 udelay(1);
457 emacp = fep->emacp; 551 if (!--n)
552 goto to;
458 } 553 }
459 554
460 count = 0; 555 /* Issue read command */
461 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 556 out_be32(&p->stacr,
462 && (count++ < MDIO_DELAY)) 557 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
558 (reg & EMAC_STACR_PRA_MASK)
559 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT));
560
561 /* Wait for read to complete */
562 n = 100;
563 while (!((r = in_be32(&p->stacr)) & EMAC_STACR_OC)) {
463 udelay(1); 564 udelay(1);
464 MDIO_DEBUG((" (count was %d)\n", count)); 565 if (!--n)
566 goto to;
567 }
465 568
466 if ((stacr & EMAC_STACR_OC) == 0) { 569 if (unlikely(r & EMAC_STACR_PHYE)) {
467 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name); 570 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
468 return; 571 id, reg);
572 return -EREMOTEIO;
469 } 573 }
470 574
471 /* Clear the speed bits and make a read request to the PHY */ 575 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
576 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
577 return r;
578 to:
579 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
580 return -ETIMEDOUT;
581}
472 582
473 stacr = ((EMAC_STACR_WRITE | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ); 583static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
474 stacr |= ((mii_id & 0x1f) << 5) | ((data & 0xffff) << 16); 584 u16 val)
585{
586 struct emac_regs *p = dev->emacp;
587 int n;
475 588
476 out_be32(&emacp->em0stacr, stacr); 589 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
590 val);
477 591
478 count = 0; 592 /* Enable proper MDIO port */
479 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 593 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
480 && (count++ < MDIO_DELAY)) 594
595 /* Wait for management interface to be idle */
596 n = 10;
597 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
481 udelay(1); 598 udelay(1);
482 MDIO_DEBUG((" (count was %d)\n", count)); 599 if (!--n)
600 goto to;
601 }
483 602
484 if ((stacr & EMAC_STACR_OC) == 0) 603 /* Issue write command */
485 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name); 604 out_be32(&p->stacr,
605 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
606 (reg & EMAC_STACR_PRA_MASK) |
607 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
608 (val << EMAC_STACR_PHYD_SHIFT));
486 609
487 /* Check for a write error */ 610 /* Wait for write to complete */
488 if ((stacr & EMAC_STACR_PHYE) != 0) { 611 n = 100;
489 MDIO_DEBUG(("EMAC MDIO PHY error !\n")); 612 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
613 udelay(1);
614 if (!--n)
615 goto to;
490 } 616 }
617 return;
618 to:
619 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
491} 620}
492 621
493static void emac_txeob_dev(void *param, u32 chanmask) 622static int emac_mdio_read(struct net_device *ndev, int id, int reg)
494{ 623{
495 struct net_device *dev = param; 624 struct ocp_enet_private *dev = ndev->priv;
496 struct ocp_enet_private *fep = dev->priv; 625 int res;
497 unsigned long flags; 626
498 627 local_bh_disable();
499 spin_lock_irqsave(&fep->lock, flags); 628 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
500 629 (u8) reg);
501 PKT_DEBUG(("emac_txeob_dev() entry, tx_cnt: %d\n", fep->tx_cnt)); 630 local_bh_enable();
502 631 return res;
503 while (fep->tx_cnt && 632}
504 !(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {
505 633
506 if (fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_LAST) { 634static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
507 /* Tell the system the transmit completed. */ 635{
508 dma_unmap_single(&fep->ocpdev->dev, 636 struct ocp_enet_private *dev = ndev->priv;
509 fep->tx_desc[fep->ack_slot].data_ptr,
510 fep->tx_desc[fep->ack_slot].data_len,
511 DMA_TO_DEVICE);
512 dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);
513 637
514 if (fep->tx_desc[fep->ack_slot].ctrl & 638 local_bh_disable();
515 (EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC)) 639 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
516 fep->stats.collisions++; 640 (u8) reg, (u16) val);
517 } 641 local_bh_enable();
642}
518 643
519 fep->tx_skb[fep->ack_slot] = (struct sk_buff *)NULL; 644/* BHs disabled */
520 if (++fep->ack_slot == NUM_TX_BUFF) 645static void emac_set_multicast_list(struct net_device *ndev)
521 fep->ack_slot = 0; 646{
647 struct ocp_enet_private *dev = ndev->priv;
648 struct emac_regs *p = dev->emacp;
649 u32 rmr = emac_iff2rmr(ndev);
650
651 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
652 BUG_ON(!netif_running(dev->ndev));
653
654 /* I decided to relax register access rules here to avoid
655 * full EMAC reset.
656 *
657 * There is a real problem with EMAC4 core if we use MWSW_001 bit
658 * in MR1 register and do a full EMAC reset.
659 * One TX BD status update is delayed and, after EMAC reset, it
660 * never happens, resulting in TX hung (it'll be recovered by TX
661 * timeout handler eventually, but this is just gross).
662 * So we either have to do full TX reset or try to cheat here :)
663 *
664 * The only required change is to RX mode register, so I *think* all
665 * we need is just to stop RX channel. This seems to work on all
666 * tested SoCs. --ebs
667 */
668 emac_rx_disable(dev);
669 if (rmr & EMAC_RMR_MAE)
670 emac_hash_mc(dev);
671 out_be32(&p->rmr, rmr);
672 emac_rx_enable(dev);
673}
522 674
523 fep->tx_cnt--; 675/* BHs disabled */
676static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
677{
678 struct ocp_func_emac_data *emacdata = dev->def->additions;
679 int rx_sync_size = emac_rx_sync_size(new_mtu);
680 int rx_skb_size = emac_rx_skb_size(new_mtu);
681 int i, ret = 0;
682
683 emac_rx_disable(dev);
684 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
685
686 if (dev->rx_sg_skb) {
687 ++dev->estats.rx_dropped_resize;
688 dev_kfree_skb(dev->rx_sg_skb);
689 dev->rx_sg_skb = NULL;
524 } 690 }
525 if (fep->tx_cnt < NUM_TX_BUFF)
526 netif_wake_queue(dev);
527 691
528 PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt)); 692 /* Make a first pass over RX ring and mark BDs ready, dropping
693 * non-processed packets on the way. We need this as a separate pass
694 * to simplify error recovery in the case of allocation failure later.
695 */
696 for (i = 0; i < NUM_RX_BUFF; ++i) {
697 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
698 ++dev->estats.rx_dropped_resize;
529 699
530 spin_unlock_irqrestore(&fep->lock, flags); 700 dev->rx_desc[i].data_len = 0;
531} 701 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
702 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
703 }
532 704
533/* 705 /* Reallocate RX ring only if bigger skb buffers are required */
534 Fill/Re-fill the rx chain with valid ctrl/ptrs. 706 if (rx_skb_size <= dev->rx_skb_size)
535 This function will fill from rx_slot up to the parm end. 707 goto skip;
536 So to completely fill the chain pre-set rx_slot to 0 and
537 pass in an end of 0.
538 */
539static void emac_rx_fill(struct net_device *dev, int end)
540{
541 int i;
542 struct ocp_enet_private *fep = dev->priv;
543
544 i = fep->rx_slot;
545 do {
546 /* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
547 * it breaks our cache line alignement. However, we still allocate
548 * +16 so that we end up allocating the exact same size as
549 * dev_alloc_skb() would do.
550 * Also, because of the skb_res, the max DMA size we give to EMAC
551 * is slighly wrong, causing it to potentially DMA 2 more bytes
552 * from a broken/oversized packet. These 16 bytes will take care
553 * that we don't walk on somebody else toes with that.
554 */
555 fep->rx_skb[i] =
556 alloc_skb(fep->rx_buffer_size + 16, GFP_ATOMIC);
557
558 if (fep->rx_skb[i] == NULL) {
559 /* Keep rx_slot here, the next time clean/fill is called
560 * we will try again before the MAL wraps back here
561 * If the MAL tries to use this descriptor with
562 * the EMPTY bit off it will cause the
563 * rxde interrupt. That is where we will
564 * try again to allocate an sk_buff.
565 */
566 break;
567 708
709 /* Second pass, allocate new skbs */
710 for (i = 0; i < NUM_RX_BUFF; ++i) {
711 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
712 if (!skb) {
713 ret = -ENOMEM;
714 goto oom;
568 } 715 }
569 716
570 if (skb_res) 717 BUG_ON(!dev->rx_skb[i]);
571 skb_reserve(fep->rx_skb[i], skb_res); 718 dev_kfree_skb(dev->rx_skb[i]);
572 719
573 /* We must NOT dma_map_single the cache line right after the 720 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
574 * buffer, so we must crop our sync size to account for the 721 dev->rx_desc[i].data_ptr =
575 * reserved space 722 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
576 */ 723 DMA_FROM_DEVICE) + 2;
577 fep->rx_desc[i].data_ptr = 724 dev->rx_skb[i] = skb;
578 (unsigned char *)dma_map_single(&fep->ocpdev->dev, 725 }
579 (void *)fep->rx_skb[i]-> 726 skip:
580 data, 727 /* Check if we need to change "Jumbo" bit in MR1 */
581 fep->rx_buffer_size - 728 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
582 skb_res, DMA_FROM_DEVICE); 729 /* This is to prevent starting RX channel in emac_rx_enable() */
583 730 dev->commac.rx_stopped = 1;
584 /* 731
585 * Some 4xx implementations use the previously 732 dev->ndev->mtu = new_mtu;
586 * reserved bits in data_len to encode the MS 733 emac_full_tx_reset(dev->ndev);
587 * 4-bits of a 36-bit physical address (ERPN) 734 }
588 * This must be initialized.
589 */
590 fep->rx_desc[i].data_len = 0;
591 fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR |
592 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
593 735
594 } while ((i = (i + 1) % NUM_RX_BUFF) != end); 736 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
737 oom:
738 /* Restart RX */
739 dev->commac.rx_stopped = dev->rx_slot = 0;
740 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
741 emac_rx_enable(dev);
595 742
596 fep->rx_slot = i; 743 return ret;
597} 744}
598 745
599static void 746/* Process ctx, rtnl_lock semaphore */
600emac_rx_csum(struct net_device *dev, unsigned short ctrl, struct sk_buff *skb) 747static int emac_change_mtu(struct net_device *ndev, int new_mtu)
601{ 748{
602 struct ocp_enet_private *fep = dev->priv; 749 struct ocp_enet_private *dev = ndev->priv;
750 int ret = 0;
603 751
604 /* Exit if interface has no TAH engine */ 752 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
605 if (!fep->tah_dev) { 753 return -EINVAL;
606 skb->ip_summed = CHECKSUM_NONE;
607 return;
608 }
609 754
610 /* Check for TCP/UDP/IP csum error */ 755 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
611 if (ctrl & EMAC_CSUM_VER_ERROR) {
612 /* Let the stack verify checksum errors */
613 skb->ip_summed = CHECKSUM_NONE;
614/* adapter->hw_csum_err++; */
615 } else {
616 /* Csum is good */
617 skb->ip_summed = CHECKSUM_UNNECESSARY;
618/* adapter->hw_csum_good++; */
619 }
620}
621 756
622static int emac_rx_clean(struct net_device *dev) 757 local_bh_disable();
623{ 758 if (netif_running(ndev)) {
624 int i, b, bnum = 0, buf[6]; 759 /* Check if we really need to reinitalize RX ring */
625 int error, frame_length; 760 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
626 struct ocp_enet_private *fep = dev->priv; 761 ret = emac_resize_rx_ring(dev, new_mtu);
627 unsigned short ctrl; 762 }
628 763
629 i = fep->rx_slot; 764 if (!ret) {
765 ndev->mtu = new_mtu;
766 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
767 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
768 }
769 local_bh_enable();
630 770
631 PKT_DEBUG(("emac_rx_clean() entry, rx_slot: %d\n", fep->rx_slot)); 771 return ret;
772}
632 773
633 do { 774static void emac_clean_tx_ring(struct ocp_enet_private *dev)
634 if (fep->rx_skb[i] == NULL) 775{
635 continue; /*we have already handled the packet but haved failed to alloc */ 776 int i;
636 /* 777 for (i = 0; i < NUM_TX_BUFF; ++i) {
637 since rx_desc is in uncached mem we don't keep reading it directly 778 if (dev->tx_skb[i]) {
638 we pull out a local copy of ctrl and do the checks on the copy. 779 dev_kfree_skb(dev->tx_skb[i]);
639 */ 780 dev->tx_skb[i] = NULL;
640 ctrl = fep->rx_desc[i].ctrl; 781 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
641 if (ctrl & MAL_RX_CTRL_EMPTY) 782 ++dev->estats.tx_dropped;
642 break; /*we don't have any more ready packets */
643
644 if (EMAC_IS_BAD_RX_PACKET(ctrl)) {
645 fep->stats.rx_errors++;
646 fep->stats.rx_dropped++;
647
648 if (ctrl & EMAC_RX_ST_OE)
649 fep->stats.rx_fifo_errors++;
650 if (ctrl & EMAC_RX_ST_AE)
651 fep->stats.rx_frame_errors++;
652 if (ctrl & EMAC_RX_ST_BFCS)
653 fep->stats.rx_crc_errors++;
654 if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |
655 EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))
656 fep->stats.rx_length_errors++;
657 } else {
658 if ((ctrl & (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) ==
659 (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) {
660 /* Single descriptor packet */
661 emac_rx_csum(dev, ctrl, fep->rx_skb[i]);
662 /* Send the skb up the chain. */
663 frame_length = fep->rx_desc[i].data_len - 4;
664 skb_put(fep->rx_skb[i], frame_length);
665 fep->rx_skb[i]->dev = dev;
666 fep->rx_skb[i]->protocol =
667 eth_type_trans(fep->rx_skb[i], dev);
668 error = netif_rx(fep->rx_skb[i]);
669
670 if ((error == NET_RX_DROP) ||
671 (error == NET_RX_BAD)) {
672 fep->stats.rx_dropped++;
673 } else {
674 fep->stats.rx_packets++;
675 fep->stats.rx_bytes += frame_length;
676 }
677 fep->rx_skb[i] = NULL;
678 } else {
679 /* Multiple descriptor packet */
680 if (ctrl & MAL_RX_CTRL_FIRST) {
681 if (fep->rx_desc[(i + 1) % NUM_RX_BUFF].
682 ctrl & MAL_RX_CTRL_EMPTY)
683 break;
684 bnum = 0;
685 buf[bnum] = i;
686 ++bnum;
687 continue;
688 }
689 if (((ctrl & MAL_RX_CTRL_FIRST) !=
690 MAL_RX_CTRL_FIRST) &&
691 ((ctrl & MAL_RX_CTRL_LAST) !=
692 MAL_RX_CTRL_LAST)) {
693 if (fep->rx_desc[(i + 1) %
694 NUM_RX_BUFF].ctrl &
695 MAL_RX_CTRL_EMPTY) {
696 i = buf[0];
697 break;
698 }
699 buf[bnum] = i;
700 ++bnum;
701 continue;
702 }
703 if (ctrl & MAL_RX_CTRL_LAST) {
704 buf[bnum] = i;
705 ++bnum;
706 skb_put(fep->rx_skb[buf[0]],
707 fep->rx_desc[buf[0]].data_len);
708 for (b = 1; b < bnum; b++) {
709 /*
710 * MAL is braindead, we need
711 * to copy the remainder
712 * of the packet from the
713 * latter descriptor buffers
714 * to the first skb. Then
715 * dispose of the source
716 * skbs.
717 *
718 * Once the stack is fixed
719 * to handle frags on most
720 * protocols we can generate
721 * a fragmented skb with
722 * no copies.
723 */
724 memcpy(fep->rx_skb[buf[0]]->
725 data +
726 fep->rx_skb[buf[0]]->len,
727 fep->rx_skb[buf[b]]->
728 data,
729 fep->rx_desc[buf[b]].
730 data_len);
731 skb_put(fep->rx_skb[buf[0]],
732 fep->rx_desc[buf[b]].
733 data_len);
734 dma_unmap_single(&fep->ocpdev->
735 dev,
736 fep->
737 rx_desc[buf
738 [b]].
739 data_ptr,
740 fep->
741 rx_desc[buf
742 [b]].
743 data_len,
744 DMA_FROM_DEVICE);
745 dev_kfree_skb(fep->
746 rx_skb[buf[b]]);
747 }
748 emac_rx_csum(dev, ctrl,
749 fep->rx_skb[buf[0]]);
750
751 fep->rx_skb[buf[0]]->dev = dev;
752 fep->rx_skb[buf[0]]->protocol =
753 eth_type_trans(fep->rx_skb[buf[0]],
754 dev);
755 error = netif_rx(fep->rx_skb[buf[0]]);
756
757 if ((error == NET_RX_DROP)
758 || (error == NET_RX_BAD)) {
759 fep->stats.rx_dropped++;
760 } else {
761 fep->stats.rx_packets++;
762 fep->stats.rx_bytes +=
763 fep->rx_skb[buf[0]]->len;
764 }
765 for (b = 0; b < bnum; b++)
766 fep->rx_skb[buf[b]] = NULL;
767 }
768 }
769 } 783 }
770 } while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot); 784 dev->tx_desc[i].ctrl = 0;
771 785 dev->tx_desc[i].data_ptr = 0;
772 PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot)); 786 }
773
774 return i;
775} 787}
776 788
777static void emac_rxeob_dev(void *param, u32 chanmask) 789static void emac_clean_rx_ring(struct ocp_enet_private *dev)
778{ 790{
779 struct net_device *dev = param; 791 int i;
780 struct ocp_enet_private *fep = dev->priv; 792 for (i = 0; i < NUM_RX_BUFF; ++i)
781 unsigned long flags; 793 if (dev->rx_skb[i]) {
782 int n; 794 dev->rx_desc[i].ctrl = 0;
795 dev_kfree_skb(dev->rx_skb[i]);
796 dev->rx_skb[i] = NULL;
797 dev->rx_desc[i].data_ptr = 0;
798 }
783 799
784 spin_lock_irqsave(&fep->lock, flags); 800 if (dev->rx_sg_skb) {
785 if ((n = emac_rx_clean(dev)) != fep->rx_slot) 801 dev_kfree_skb(dev->rx_sg_skb);
786 emac_rx_fill(dev, n); 802 dev->rx_sg_skb = NULL;
787 spin_unlock_irqrestore(&fep->lock, flags); 803 }
788} 804}
789 805
790/* 806static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
791 * This interrupt should never occurr, we don't program 807 int flags)
792 * the MAL for contiunous mode.
793 */
794static void emac_txde_dev(void *param, u32 chanmask)
795{ 808{
796 struct net_device *dev = param; 809 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
797 struct ocp_enet_private *fep = dev->priv; 810 if (unlikely(!skb))
811 return -ENOMEM;
798 812
799 printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name); 813 dev->rx_skb[slot] = skb;
814 dev->rx_desc[slot].data_len = 0;
800 815
801 emac_mac_dump(dev); 816 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
802 emac_mal_dump(dev); 817 dev->rx_desc[slot].data_ptr =
818 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
819 DMA_FROM_DEVICE) + 2;
820 barrier();
821 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
822 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
803 823
804 /* Reenable the transmit channel */ 824 return 0;
805 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
806} 825}
807 826
808/* 827static void emac_print_link_status(struct ocp_enet_private *dev)
809 * This interrupt should be very rare at best. This occurs when
810 * the hardware has a problem with the receive descriptors. The manual
811 * states that it occurs when the hardware cannot the receive descriptor
812 * empty bit is not set. The recovery mechanism will be to
813 * traverse through the descriptors, handle any that are marked to be
814 * handled and reinitialize each along the way. At that point the driver
815 * will be restarted.
816 */
817static void emac_rxde_dev(void *param, u32 chanmask)
818{ 828{
819 struct net_device *dev = param; 829 if (netif_carrier_ok(dev->ndev))
820 struct ocp_enet_private *fep = dev->priv; 830 printk(KERN_INFO "%s: link is up, %d %s%s\n",
821 unsigned long flags; 831 dev->ndev->name, dev->phy.speed,
822 832 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
823 if (net_ratelimit()) { 833 dev->phy.pause ? ", pause enabled" :
824 printk(KERN_WARNING "%s: receive descriptor error\n", 834 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
825 fep->ndev->name); 835 else
836 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
837}
826 838
827 emac_mac_dump(dev); 839/* Process ctx, rtnl_lock semaphore */
828 emac_mal_dump(dev); 840static int emac_open(struct net_device *ndev)
829 emac_desc_dump(dev); 841{
842 struct ocp_enet_private *dev = ndev->priv;
843 struct ocp_func_emac_data *emacdata = dev->def->additions;
844 int err, i;
845
846 DBG("%d: open" NL, dev->def->index);
847
848 /* Setup error IRQ handler */
849 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
850 if (err) {
851 printk(KERN_ERR "%s: failed to request IRQ %d\n",
852 ndev->name, dev->def->irq);
853 return err;
830 } 854 }
831 855
832 /* Disable RX channel */ 856 /* Allocate RX ring */
833 spin_lock_irqsave(&fep->lock, flags); 857 for (i = 0; i < NUM_RX_BUFF; ++i)
834 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 858 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
835 859 printk(KERN_ERR "%s: failed to allocate RX ring\n",
836 /* For now, charge the error against all emacs */ 860 ndev->name);
837 fep->stats.rx_errors++; 861 goto oom;
838 862 }
839 /* so do we have any good packets still? */
840 emac_rx_clean(dev);
841
842 /* When the interface is restarted it resets processing to the
843 * first descriptor in the table.
844 */
845
846 fep->rx_slot = 0;
847 emac_rx_fill(dev, 0);
848 863
849 set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask); 864 local_bh_disable();
850 set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask); 865 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
866 dev->commac.rx_stopped = 0;
867 dev->rx_sg_skb = NULL;
868
869 if (dev->phy.address >= 0) {
870 int link_poll_interval;
871 if (dev->phy.def->ops->poll_link(&dev->phy)) {
872 dev->phy.def->ops->read_link(&dev->phy);
873 EMAC_RX_CLK_DEFAULT(dev->def->index);
874 netif_carrier_on(dev->ndev);
875 link_poll_interval = PHY_POLL_LINK_ON;
876 } else {
877 EMAC_RX_CLK_TX(dev->def->index);
878 netif_carrier_off(dev->ndev);
879 link_poll_interval = PHY_POLL_LINK_OFF;
880 }
881 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
882 emac_print_link_status(dev);
883 } else
884 netif_carrier_on(dev->ndev);
885
886 emac_configure(dev);
887 mal_poll_add(dev->mal, &dev->commac);
888 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
889 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
890 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
891 emac_tx_enable(dev);
892 emac_rx_enable(dev);
893 netif_start_queue(ndev);
894 local_bh_enable();
851 895
852 /* Reenable the receive channels */ 896 return 0;
853 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 897 oom:
854 spin_unlock_irqrestore(&fep->lock, flags); 898 emac_clean_rx_ring(dev);
899 free_irq(dev->def->irq, dev);
900 return -ENOMEM;
855} 901}
856 902
857static irqreturn_t 903/* BHs disabled */
858emac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs) 904static int emac_link_differs(struct ocp_enet_private *dev)
859{ 905{
860 struct net_device *dev = dev_instance; 906 u32 r = in_be32(&dev->emacp->mr1);
861 struct ocp_enet_private *fep = dev->priv;
862 emac_t *emacp = fep->emacp;
863 unsigned long tmp_em0isr;
864 907
865 /* EMAC interrupt */ 908 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
866 tmp_em0isr = in_be32(&emacp->em0isr); 909 int speed, pause, asym_pause;
867 if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {
868 /* This error is a hard transmit error - could retransmit */
869 fep->stats.tx_errors++;
870 910
871 /* Reenable the transmit channel */ 911 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
872 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask); 912 speed = SPEED_1000;
913 else if (r & EMAC_MR1_MF_100)
914 speed = SPEED_100;
915 else
916 speed = SPEED_10;
873 917
874 } else { 918 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
875 fep->stats.rx_errors++; 919 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
920 pause = 1;
921 asym_pause = 0;
922 break;
923 case EMAC_MR1_APP:
924 pause = 0;
925 asym_pause = 1;
926 break;
927 default:
928 pause = asym_pause = 0;
876 } 929 }
877 930 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
878 if (tmp_em0isr & EMAC_ISR_RP) 931 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
879 fep->stats.rx_length_errors++;
880 if (tmp_em0isr & EMAC_ISR_ALE)
881 fep->stats.rx_frame_errors++;
882 if (tmp_em0isr & EMAC_ISR_BFCS)
883 fep->stats.rx_crc_errors++;
884 if (tmp_em0isr & EMAC_ISR_PTLE)
885 fep->stats.rx_length_errors++;
886 if (tmp_em0isr & EMAC_ISR_ORE)
887 fep->stats.rx_length_errors++;
888 if (tmp_em0isr & EMAC_ISR_TE0)
889 fep->stats.tx_aborted_errors++;
890
891 emac_err_dump(dev, tmp_em0isr);
892
893 out_be32(&emacp->em0isr, tmp_em0isr);
894
895 return IRQ_HANDLED;
896} 932}
897 933
898static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) 934/* BHs disabled */
935static void emac_link_timer(unsigned long data)
899{ 936{
900 unsigned short ctrl; 937 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
901 unsigned long flags; 938 int link_poll_interval;
902 struct ocp_enet_private *fep = dev->priv;
903 emac_t *emacp = fep->emacp;
904 int len = skb->len;
905 unsigned int offset = 0, size, f, tx_slot_first;
906 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
907 939
908 spin_lock_irqsave(&fep->lock, flags); 940 DBG2("%d: link timer" NL, dev->def->index);
909 941
910 len -= skb->data_len; 942 if (dev->phy.def->ops->poll_link(&dev->phy)) {
943 if (!netif_carrier_ok(dev->ndev)) {
944 EMAC_RX_CLK_DEFAULT(dev->def->index);
911 945
912 if ((fep->tx_cnt + nr_frags + len / DESC_BUF_SIZE + 1) > NUM_TX_BUFF) { 946 /* Get new link parameters */
913 PKT_DEBUG(("emac_start_xmit() stopping queue\n")); 947 dev->phy.def->ops->read_link(&dev->phy);
914 netif_stop_queue(dev);
915 spin_unlock_irqrestore(&fep->lock, flags);
916 return -EBUSY;
917 }
918 948
919 tx_slot_first = fep->tx_slot; 949 if (dev->tah_dev || emac_link_differs(dev))
950 emac_full_tx_reset(dev->ndev);
920 951
921 while (len) { 952 netif_carrier_on(dev->ndev);
922 size = min(len, DESC_BUF_SIZE); 953 emac_print_link_status(dev);
923 954 }
924 fep->tx_desc[fep->tx_slot].data_len = (short)size; 955 link_poll_interval = PHY_POLL_LINK_ON;
925 fep->tx_desc[fep->tx_slot].data_ptr = 956 } else {
926 (unsigned char *)dma_map_single(&fep->ocpdev->dev, 957 if (netif_carrier_ok(dev->ndev)) {
927 (void *)((unsigned int)skb-> 958 EMAC_RX_CLK_TX(dev->def->index);
928 data + offset), 959#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
929 size, DMA_TO_DEVICE); 960 emac_reinitialize(dev);
930 961#endif
931 ctrl = EMAC_TX_CTRL_DFLT; 962 netif_carrier_off(dev->ndev);
932 if (fep->tx_slot != tx_slot_first) 963 emac_print_link_status(dev);
933 ctrl |= MAL_TX_CTRL_READY;
934 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
935 ctrl |= MAL_TX_CTRL_WRAP;
936 if (!nr_frags && (len == size)) {
937 ctrl |= MAL_TX_CTRL_LAST;
938 fep->tx_skb[fep->tx_slot] = skb;
939 } 964 }
940 if (skb->ip_summed == CHECKSUM_HW)
941 ctrl |= EMAC_TX_CTRL_TAH_CSUM;
942 965
943 fep->tx_desc[fep->tx_slot].ctrl = ctrl; 966 /* Retry reset if the previous attempt failed.
967 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
968 * case, but I left it here because it shouldn't trigger for
969 * sane PHYs anyway.
970 */
971 if (unlikely(dev->reset_failed))
972 emac_reinitialize(dev);
944 973
945 len -= size; 974 link_poll_interval = PHY_POLL_LINK_OFF;
946 offset += size; 975 }
976 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
977}
947 978
948 /* Bump tx count */ 979/* BHs disabled */
949 if (++fep->tx_cnt == NUM_TX_BUFF) 980static void emac_force_link_update(struct ocp_enet_private *dev)
950 netif_stop_queue(dev); 981{
982 netif_carrier_off(dev->ndev);
983 if (timer_pending(&dev->link_timer))
984 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
985}
951 986
952 /* Next descriptor */ 987/* Process ctx, rtnl_lock semaphore */
953 if (++fep->tx_slot == NUM_TX_BUFF) 988static int emac_close(struct net_device *ndev)
954 fep->tx_slot = 0; 989{
955 } 990 struct ocp_enet_private *dev = ndev->priv;
991 struct ocp_func_emac_data *emacdata = dev->def->additions;
956 992
957 for (f = 0; f < nr_frags; f++) { 993 DBG("%d: close" NL, dev->def->index);
958 struct skb_frag_struct *frag;
959 994
960 frag = &skb_shinfo(skb)->frags[f]; 995 local_bh_disable();
961 len = frag->size;
962 offset = 0;
963
964 while (len) {
965 size = min(len, DESC_BUF_SIZE);
966
967 dma_map_page(&fep->ocpdev->dev,
968 frag->page,
969 frag->page_offset + offset,
970 size, DMA_TO_DEVICE);
971
972 ctrl = EMAC_TX_CTRL_DFLT | MAL_TX_CTRL_READY;
973 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
974 ctrl |= MAL_TX_CTRL_WRAP;
975 if ((f == (nr_frags - 1)) && (len == size)) {
976 ctrl |= MAL_TX_CTRL_LAST;
977 fep->tx_skb[fep->tx_slot] = skb;
978 }
979 996
980 if (skb->ip_summed == CHECKSUM_HW) 997 if (dev->phy.address >= 0)
981 ctrl |= EMAC_TX_CTRL_TAH_CSUM; 998 del_timer_sync(&dev->link_timer);
982 999
983 fep->tx_desc[fep->tx_slot].data_len = (short)size; 1000 netif_stop_queue(ndev);
984 fep->tx_desc[fep->tx_slot].data_ptr = 1001 emac_rx_disable(dev);
985 (char *)((page_to_pfn(frag->page) << PAGE_SHIFT) + 1002 emac_tx_disable(dev);
986 frag->page_offset + offset); 1003 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
987 fep->tx_desc[fep->tx_slot].ctrl = ctrl; 1004 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1005 mal_poll_del(dev->mal, &dev->commac);
1006 local_bh_enable();
988 1007
989 len -= size; 1008 emac_clean_tx_ring(dev);
990 offset += size; 1009 emac_clean_rx_ring(dev);
1010 free_irq(dev->def->irq, dev);
991 1011
992 /* Bump tx count */ 1012 return 0;
993 if (++fep->tx_cnt == NUM_TX_BUFF) 1013}
994 netif_stop_queue(dev);
995 1014
996 /* Next descriptor */ 1015static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
997 if (++fep->tx_slot == NUM_TX_BUFF) 1016 struct sk_buff *skb)
998 fep->tx_slot = 0; 1017{
999 } 1018#if defined(CONFIG_IBM_EMAC_TAH)
1019 if (skb->ip_summed == CHECKSUM_HW) {
1020 ++dev->stats.tx_packets_csum;
1021 return EMAC_TX_CTRL_TAH_CSUM;
1000 } 1022 }
1023#endif
1024 return 0;
1025}
1001 1026
1002 /* 1027static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1003 * Deferred set READY on first descriptor of packet to 1028{
1004 * avoid TX MAL race. 1029 struct emac_regs *p = dev->emacp;
1005 */ 1030 struct net_device *ndev = dev->ndev;
1006 fep->tx_desc[tx_slot_first].ctrl |= MAL_TX_CTRL_READY;
1007
1008 /* Send the packet out. */
1009 out_be32(&emacp->em0tmr0, EMAC_TMR0_XMIT);
1010 1031
1011 fep->stats.tx_packets++; 1032 /* Send the packet out */
1012 fep->stats.tx_bytes += skb->len; 1033 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1013 1034
1014 PKT_DEBUG(("emac_start_xmit() exitn")); 1035 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1036 netif_stop_queue(ndev);
1037 DBG2("%d: stopped TX queue" NL, dev->def->index);
1038 }
1015 1039
1016 spin_unlock_irqrestore(&fep->lock, flags); 1040 ndev->trans_start = jiffies;
1041 ++dev->stats.tx_packets;
1042 dev->stats.tx_bytes += len;
1017 1043
1018 return 0; 1044 return 0;
1019} 1045}
1020 1046
1021static int emac_adjust_to_link(struct ocp_enet_private *fep) 1047/* BHs disabled */
1048static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1022{ 1049{
1023 emac_t *emacp = fep->emacp; 1050 struct ocp_enet_private *dev = ndev->priv;
1024 unsigned long mode_reg; 1051 unsigned int len = skb->len;
1025 int full_duplex, speed; 1052 int slot;
1026 1053
1027 full_duplex = 0; 1054 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1028 speed = SPEED_10; 1055 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1029 1056
1030 /* set mode register 1 defaults */ 1057 slot = dev->tx_slot++;
1031 mode_reg = EMAC_M1_DEFAULT; 1058 if (dev->tx_slot == NUM_TX_BUFF) {
1032 1059 dev->tx_slot = 0;
1033 /* Read link mode on PHY */ 1060 ctrl |= MAL_TX_CTRL_WRAP;
1034 if (fep->phy_mii.def->ops->read_link(&fep->phy_mii) == 0) {
1035 /* If an error occurred, we don't deal with it yet */
1036 full_duplex = (fep->phy_mii.duplex == DUPLEX_FULL);
1037 speed = fep->phy_mii.speed;
1038 } 1061 }
1039 1062
1063 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1040 1064
1041 /* set speed (default is 10Mb) */ 1065 dev->tx_skb[slot] = skb;
1042 switch (speed) { 1066 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1043 case SPEED_1000: 1067 DMA_TO_DEVICE);
1044 mode_reg |= EMAC_M1_RFS_16K; 1068 dev->tx_desc[slot].data_len = (u16) len;
1045 if (fep->rgmii_dev) { 1069 barrier();
1046 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(fep->rgmii_dev); 1070 dev->tx_desc[slot].ctrl = ctrl;
1047
1048 if ((rgmii->mode[fep->rgmii_input] == RTBI)
1049 || (rgmii->mode[fep->rgmii_input] == TBI))
1050 mode_reg |= EMAC_M1_MF_1000GPCS;
1051 else
1052 mode_reg |= EMAC_M1_MF_1000MBPS;
1053
1054 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1055 1000);
1056 }
1057 break;
1058 case SPEED_100:
1059 mode_reg |= EMAC_M1_MF_100MBPS | EMAC_M1_RFS_4K;
1060 if (fep->rgmii_dev)
1061 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1062 100);
1063 if (fep->zmii_dev)
1064 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1065 100);
1066 break;
1067 case SPEED_10:
1068 default:
1069 mode_reg = (mode_reg & ~EMAC_M1_MF_100MBPS) | EMAC_M1_RFS_4K;
1070 if (fep->rgmii_dev)
1071 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1072 10);
1073 if (fep->zmii_dev)
1074 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1075 10);
1076 }
1077
1078 if (full_duplex)
1079 mode_reg |= EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_IST;
1080 else
1081 mode_reg &= ~(EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_ILE);
1082 1071
1083 LINK_DEBUG(("%s: adjust to link, speed: %d, duplex: %d, opened: %d\n", 1072 return emac_xmit_finish(dev, len);
1084 fep->ndev->name, speed, full_duplex, fep->opened));
1085
1086 printk(KERN_INFO "%s: Speed: %d, %s duplex.\n",
1087 fep->ndev->name, speed, full_duplex ? "Full" : "Half");
1088 if (fep->opened)
1089 out_be32(&emacp->em0mr1, mode_reg);
1090
1091 return 0;
1092} 1073}
1093 1074
1094static int emac_set_mac_address(struct net_device *ndev, void *p) 1075#if defined(CONFIG_IBM_EMAC_TAH)
1076static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1077 u32 pd, int len, int last, u16 base_ctrl)
1095{ 1078{
1096 struct ocp_enet_private *fep = ndev->priv; 1079 while (1) {
1097 emac_t *emacp = fep->emacp; 1080 u16 ctrl = base_ctrl;
1098 struct sockaddr *addr = p; 1081 int chunk = min(len, MAL_MAX_TX_SIZE);
1082 len -= chunk;
1099 1083
1100 if (!is_valid_ether_addr(addr->sa_data)) 1084 slot = (slot + 1) % NUM_TX_BUFF;
1101 return -EADDRNOTAVAIL;
1102 1085
1103 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 1086 if (last && !len)
1087 ctrl |= MAL_TX_CTRL_LAST;
1088 if (slot == NUM_TX_BUFF - 1)
1089 ctrl |= MAL_TX_CTRL_WRAP;
1104 1090
1105 /* set the high address */ 1091 dev->tx_skb[slot] = NULL;
1106 out_be32(&emacp->em0iahr, 1092 dev->tx_desc[slot].data_ptr = pd;
1107 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]); 1093 dev->tx_desc[slot].data_len = (u16) chunk;
1094 dev->tx_desc[slot].ctrl = ctrl;
1095 ++dev->tx_cnt;
1108 1096
1109 /* set the low address */ 1097 if (!len)
1110 out_be32(&emacp->em0ialr, 1098 break;
1111 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1112 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1113 1099
1114 return 0; 1100 pd += chunk;
1101 }
1102 return slot;
1115} 1103}
1116 1104
1117static int emac_change_mtu(struct net_device *dev, int new_mtu) 1105/* BHs disabled (SG version for TAH equipped EMACs) */
1106static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1118{ 1107{
1119 struct ocp_enet_private *fep = dev->priv; 1108 struct ocp_enet_private *dev = ndev->priv;
1120 int old_mtu = dev->mtu; 1109 int nr_frags = skb_shinfo(skb)->nr_frags;
1121 unsigned long mode_reg; 1110 int len = skb->len, chunk;
1122 emac_t *emacp = fep->emacp; 1111 int slot, i;
1123 u32 em0mr0; 1112 u16 ctrl;
1124 int i, full; 1113 u32 pd;
1125 unsigned long flags;
1126 1114
1127 if ((new_mtu < EMAC_MIN_MTU) || (new_mtu > EMAC_MAX_MTU)) { 1115 /* This is common "fast" path */
1128 printk(KERN_ERR 1116 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1129 "emac: Invalid MTU setting, MTU must be between %d and %d\n", 1117 return emac_start_xmit(skb, ndev);
1130 EMAC_MIN_MTU, EMAC_MAX_MTU);
1131 return -EINVAL;
1132 }
1133 1118
1134 if (old_mtu != new_mtu && netif_running(dev)) { 1119 len -= skb->data_len;
1135 /* Stop rx engine */
1136 em0mr0 = in_be32(&emacp->em0mr0);
1137 out_be32(&emacp->em0mr0, em0mr0 & ~EMAC_M0_RXE);
1138
1139 /* Wait for descriptors to be empty */
1140 do {
1141 full = 0;
1142 for (i = 0; i < NUM_RX_BUFF; i++)
1143 if (!(fep->rx_desc[i].ctrl & MAL_RX_CTRL_EMPTY)) {
1144 printk(KERN_NOTICE
1145 "emac: RX ring is still full\n");
1146 full = 1;
1147 }
1148 } while (full);
1149
1150 spin_lock_irqsave(&fep->lock, flags);
1151
1152 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1153
1154 /* Destroy all old rx skbs */
1155 for (i = 0; i < NUM_RX_BUFF; i++) {
1156 dma_unmap_single(&fep->ocpdev->dev,
1157 fep->rx_desc[i].data_ptr,
1158 fep->rx_desc[i].data_len,
1159 DMA_FROM_DEVICE);
1160 dev_kfree_skb(fep->rx_skb[i]);
1161 fep->rx_skb[i] = NULL;
1162 }
1163 1120
1164 /* Set new rx_buffer_size, jumbo cap, and advertise new mtu */ 1121 /* Note, this is only an *estimation*, we can still run out of empty
1165 mode_reg = in_be32(&emacp->em0mr1); 1122 * slots because of the additional fragmentation into
1166 if (new_mtu > ENET_DEF_MTU_SIZE) { 1123 * MAL_MAX_TX_SIZE-sized chunks
1167 mode_reg |= EMAC_M1_JUMBO_ENABLE; 1124 */
1168 fep->rx_buffer_size = EMAC_MAX_FRAME; 1125 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1169 } else { 1126 goto stop_queue;
1170 mode_reg &= ~EMAC_M1_JUMBO_ENABLE; 1127
1171 fep->rx_buffer_size = ENET_DEF_BUF_SIZE; 1128 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1172 } 1129 emac_tx_csum(dev, skb);
1173 dev->mtu = new_mtu; 1130 slot = dev->tx_slot;
1174 out_be32(&emacp->em0mr1, mode_reg); 1131
1132 /* skb data */
1133 dev->tx_skb[slot] = NULL;
1134 chunk = min(len, MAL_MAX_TX_SIZE);
1135 dev->tx_desc[slot].data_ptr = pd =
1136 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1137 dev->tx_desc[slot].data_len = (u16) chunk;
1138 len -= chunk;
1139 if (unlikely(len))
1140 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1141 ctrl);
1142 /* skb fragments */
1143 for (i = 0; i < nr_frags; ++i) {
1144 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1145 len = frag->size;
1175 1146
1176 /* Re-init rx skbs */ 1147 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1177 fep->rx_slot = 0; 1148 goto undo_frame;
1178 emac_rx_fill(dev, 0);
1179 1149
1180 /* Restart the rx engine */ 1150 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1181 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 1151 DMA_TO_DEVICE);
1182 out_be32(&emacp->em0mr0, em0mr0 | EMAC_M0_RXE);
1183 1152
1184 spin_unlock_irqrestore(&fep->lock, flags); 1153 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1154 ctrl);
1185 } 1155 }
1186 1156
1187 return 0; 1157 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1188} 1158 dev->tx_slot, slot);
1189 1159
1190static void __emac_set_multicast_list(struct net_device *dev) 1160 /* Attach skb to the last slot so we don't release it too early */
1191{ 1161 dev->tx_skb[slot] = skb;
1192 struct ocp_enet_private *fep = dev->priv;
1193 emac_t *emacp = fep->emacp;
1194 u32 rmr = in_be32(&emacp->em0rmr);
1195 1162
1196 /* First clear all special bits, they can be set later */ 1163 /* Send the packet out */
1197 rmr &= ~(EMAC_RMR_PME | EMAC_RMR_PMME | EMAC_RMR_MAE); 1164 if (dev->tx_slot == NUM_TX_BUFF - 1)
1165 ctrl |= MAL_TX_CTRL_WRAP;
1166 barrier();
1167 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1168 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1198 1169
1199 if (dev->flags & IFF_PROMISC) { 1170 return emac_xmit_finish(dev, skb->len);
1200 rmr |= EMAC_RMR_PME;
1201 } else if (dev->flags & IFF_ALLMULTI || 32 < dev->mc_count) {
1202 /*
1203 * Must be setting up to use multicast
1204 * Now check for promiscuous multicast
1205 */
1206 rmr |= EMAC_RMR_PMME;
1207 } else if (dev->flags & IFF_MULTICAST && 0 < dev->mc_count) {
1208 unsigned short em0gaht[4] = { 0, 0, 0, 0 };
1209 struct dev_mc_list *dmi;
1210
1211 /* Need to hash on the multicast address. */
1212 for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
1213 unsigned long mc_crc;
1214 unsigned int bit_number;
1215
1216 mc_crc = ether_crc(6, (char *)dmi->dmi_addr);
1217 bit_number = 63 - (mc_crc >> 26); /* MSB: 0 LSB: 63 */
1218 em0gaht[bit_number >> 4] |=
1219 0x8000 >> (bit_number & 0x0f);
1220 }
1221 emacp->em0gaht1 = em0gaht[0];
1222 emacp->em0gaht2 = em0gaht[1];
1223 emacp->em0gaht3 = em0gaht[2];
1224 emacp->em0gaht4 = em0gaht[3];
1225 1171
1226 /* Turn on multicast addressing */ 1172 undo_frame:
1227 rmr |= EMAC_RMR_MAE; 1173 /* Well, too bad. Our previous estimation was overly optimistic.
1174 * Undo everything.
1175 */
1176 while (slot != dev->tx_slot) {
1177 dev->tx_desc[slot].ctrl = 0;
1178 --dev->tx_cnt;
1179 if (--slot < 0)
1180 slot = NUM_TX_BUFF - 1;
1228 } 1181 }
1229 out_be32(&emacp->em0rmr, rmr); 1182 ++dev->estats.tx_undo;
1183
1184 stop_queue:
1185 netif_stop_queue(ndev);
1186 DBG2("%d: stopped TX queue" NL, dev->def->index);
1187 return 1;
1230} 1188}
1189#else
1190# define emac_start_xmit_sg emac_start_xmit
1191#endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1231 1192
1232static int emac_init_tah(struct ocp_enet_private *fep) 1193/* BHs disabled */
1194static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1233{ 1195{
1234 tah_t *tahp; 1196 struct ibm_emac_error_stats *st = &dev->estats;
1197 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1198
1199 ++st->tx_bd_errors;
1200 if (ctrl & EMAC_TX_ST_BFCS)
1201 ++st->tx_bd_bad_fcs;
1202 if (ctrl & EMAC_TX_ST_LCS)
1203 ++st->tx_bd_carrier_loss;
1204 if (ctrl & EMAC_TX_ST_ED)
1205 ++st->tx_bd_excessive_deferral;
1206 if (ctrl & EMAC_TX_ST_EC)
1207 ++st->tx_bd_excessive_collisions;
1208 if (ctrl & EMAC_TX_ST_LC)
1209 ++st->tx_bd_late_collision;
1210 if (ctrl & EMAC_TX_ST_MC)
1211 ++st->tx_bd_multple_collisions;
1212 if (ctrl & EMAC_TX_ST_SC)
1213 ++st->tx_bd_single_collision;
1214 if (ctrl & EMAC_TX_ST_UR)
1215 ++st->tx_bd_underrun;
1216 if (ctrl & EMAC_TX_ST_SQE)
1217 ++st->tx_bd_sqe;
1218}
1235 1219
1236 /* Initialize TAH and enable checksum verification */ 1220static void emac_poll_tx(void *param)
1237 tahp = (tah_t *) ioremap(fep->tah_dev->def->paddr, sizeof(*tahp)); 1221{
1222 struct ocp_enet_private *dev = param;
1223 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1224 dev->ack_slot);
1225
1226 if (dev->tx_cnt) {
1227 u16 ctrl;
1228 int slot = dev->ack_slot, n = 0;
1229 again:
1230 ctrl = dev->tx_desc[slot].ctrl;
1231 if (!(ctrl & MAL_TX_CTRL_READY)) {
1232 struct sk_buff *skb = dev->tx_skb[slot];
1233 ++n;
1234
1235 if (skb) {
1236 dev_kfree_skb(skb);
1237 dev->tx_skb[slot] = NULL;
1238 }
1239 slot = (slot + 1) % NUM_TX_BUFF;
1238 1240
1239 if (tahp == NULL) { 1241 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1240 printk(KERN_ERR "tah%d: Cannot ioremap TAH registers!\n", 1242 emac_parse_tx_error(dev, ctrl);
1241 fep->tah_dev->def->index);
1242 1243
1243 return -ENOMEM; 1244 if (--dev->tx_cnt)
1244 } 1245 goto again;
1245 1246 }
1246 out_be32(&tahp->tah_mr, TAH_MR_SR); 1247 if (n) {
1248 dev->ack_slot = slot;
1249 if (netif_queue_stopped(dev->ndev) &&
1250 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1251 netif_wake_queue(dev->ndev);
1247 1252
1248 /* wait for reset to complete */ 1253 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1249 while (in_be32(&tahp->tah_mr) & TAH_MR_SR) ; 1254 }
1255 }
1256}
1250 1257
1251 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ 1258static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1252 out_be32(&tahp->tah_mr, 1259 int len)
1253 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | 1260{
1254 TAH_MR_DIG); 1261 struct sk_buff *skb = dev->rx_skb[slot];
1262 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1255 1263
1256 iounmap(tahp); 1264 if (len)
1265 dma_map_single(dev->ldev, skb->data - 2,
1266 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1257 1267
1258 return 0; 1268 dev->rx_desc[slot].data_len = 0;
1269 barrier();
1270 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1271 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1259} 1272}
1260 1273
1261static void emac_init_rings(struct net_device *dev) 1274static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1262{ 1275{
1263 struct ocp_enet_private *ep = dev->priv; 1276 struct ibm_emac_error_stats *st = &dev->estats;
1264 int loop; 1277 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1278
1279 ++st->rx_bd_errors;
1280 if (ctrl & EMAC_RX_ST_OE)
1281 ++st->rx_bd_overrun;
1282 if (ctrl & EMAC_RX_ST_BP)
1283 ++st->rx_bd_bad_packet;
1284 if (ctrl & EMAC_RX_ST_RP)
1285 ++st->rx_bd_runt_packet;
1286 if (ctrl & EMAC_RX_ST_SE)
1287 ++st->rx_bd_short_event;
1288 if (ctrl & EMAC_RX_ST_AE)
1289 ++st->rx_bd_alignment_error;
1290 if (ctrl & EMAC_RX_ST_BFCS)
1291 ++st->rx_bd_bad_fcs;
1292 if (ctrl & EMAC_RX_ST_PTL)
1293 ++st->rx_bd_packet_too_long;
1294 if (ctrl & EMAC_RX_ST_ORE)
1295 ++st->rx_bd_out_of_range;
1296 if (ctrl & EMAC_RX_ST_IRE)
1297 ++st->rx_bd_in_range;
1298}
1265 1299
1266 ep->tx_desc = (struct mal_descriptor *)((char *)ep->mal->tx_virt_addr + 1300static inline void emac_rx_csum(struct ocp_enet_private *dev,
1267 (ep->mal_tx_chan * 1301 struct sk_buff *skb, u16 ctrl)
1268 MAL_DT_ALIGN)); 1302{
1269 ep->rx_desc = 1303#if defined(CONFIG_IBM_EMAC_TAH)
1270 (struct mal_descriptor *)((char *)ep->mal->rx_virt_addr + 1304 if (!ctrl && dev->tah_dev) {
1271 (ep->mal_rx_chan * MAL_DT_ALIGN)); 1305 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306 ++dev->stats.rx_packets_csum;
1307 }
1308#endif
1309}
1272 1310
1273 /* Fill in the transmit descriptor ring. */ 1311static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1274 for (loop = 0; loop < NUM_TX_BUFF; loop++) { 1312{
1275 if (ep->tx_skb[loop]) { 1313 if (likely(dev->rx_sg_skb != NULL)) {
1276 dma_unmap_single(&ep->ocpdev->dev, 1314 int len = dev->rx_desc[slot].data_len;
1277 ep->tx_desc[loop].data_ptr, 1315 int tot_len = dev->rx_sg_skb->len + len;
1278 ep->tx_desc[loop].data_len, 1316
1279 DMA_TO_DEVICE); 1317 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1280 dev_kfree_skb_irq(ep->tx_skb[loop]); 1318 ++dev->estats.rx_dropped_mtu;
1319 dev_kfree_skb(dev->rx_sg_skb);
1320 dev->rx_sg_skb = NULL;
1321 } else {
1322 cacheable_memcpy(dev->rx_sg_skb->tail,
1323 dev->rx_skb[slot]->data, len);
1324 skb_put(dev->rx_sg_skb, len);
1325 emac_recycle_rx_skb(dev, slot, len);
1326 return 0;
1281 } 1327 }
1282 ep->tx_skb[loop] = NULL;
1283 ep->tx_desc[loop].ctrl = 0;
1284 ep->tx_desc[loop].data_len = 0;
1285 ep->tx_desc[loop].data_ptr = NULL;
1286 }
1287 ep->tx_desc[loop - 1].ctrl |= MAL_TX_CTRL_WRAP;
1288
1289 /* Format the receive descriptor ring. */
1290 ep->rx_slot = 0;
1291 /* Default is MTU=1500 + Ethernet overhead */
1292 ep->rx_buffer_size = dev->mtu + ENET_HEADER_SIZE + ENET_FCS_SIZE;
1293 emac_rx_fill(dev, 0);
1294 if (ep->rx_slot != 0) {
1295 printk(KERN_ERR
1296 "%s: Not enough mem for RxChain durning Open?\n",
1297 dev->name);
1298 /*We couldn't fill the ring at startup?
1299 *We could clean up and fail to open but right now we will try to
1300 *carry on. It may be a sign of a bad NUM_RX_BUFF value
1301 */
1302 } 1328 }
1303 1329 emac_recycle_rx_skb(dev, slot, 0);
1304 ep->tx_cnt = 0; 1330 return -1;
1305 ep->tx_slot = 0;
1306 ep->ack_slot = 0;
1307} 1331}
1308 1332
1309static void emac_reset_configure(struct ocp_enet_private *fep) 1333/* BHs disabled */
1334static int emac_poll_rx(void *param, int budget)
1310{ 1335{
1311 emac_t *emacp = fep->emacp; 1336 struct ocp_enet_private *dev = param;
1312 int i; 1337 int slot = dev->rx_slot, received = 0;
1313
1314 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1315 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1316 1338
1317 /* 1339 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1318 * Check for a link, some PHYs don't provide a clock if
1319 * no link is present. Some EMACs will not come out of
1320 * soft reset without a PHY clock present.
1321 */
1322 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1323 /* Reset the EMAC */
1324 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1325 udelay(20);
1326 for (i = 0; i < 100; i++) {
1327 if ((in_be32(&emacp->em0mr0) & EMAC_M0_SRST) == 0)
1328 break;
1329 udelay(10);
1330 }
1331
1332 if (i >= 100) {
1333 printk(KERN_ERR "%s: Cannot reset EMAC\n",
1334 fep->ndev->name);
1335 return;
1336 }
1337 }
1338 1340
1339 /* Switch IRQs off for now */ 1341 again:
1340 out_be32(&emacp->em0iser, 0); 1342 while (budget > 0) {
1343 int len;
1344 struct sk_buff *skb;
1345 u16 ctrl = dev->rx_desc[slot].ctrl;
1341 1346
1342 /* Configure MAL rx channel */ 1347 if (ctrl & MAL_RX_CTRL_EMPTY)
1343 mal_set_rcbs(fep->mal, fep->mal_rx_chan, DESC_BUF_SIZE_REG); 1348 break;
1344 1349
1345 /* set the high address */ 1350 skb = dev->rx_skb[slot];
1346 out_be32(&emacp->em0iahr, 1351 barrier();
1347 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]); 1352 len = dev->rx_desc[slot].data_len;
1348 1353
1349 /* set the low address */ 1354 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1350 out_be32(&emacp->em0ialr, 1355 goto sg;
1351 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1352 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1353 1356
1354 /* Adjust to link */ 1357 ctrl &= EMAC_BAD_RX_MASK;
1355 if (netif_carrier_ok(fep->ndev)) 1358 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1356 emac_adjust_to_link(fep); 1359 emac_parse_rx_error(dev, ctrl);
1360 ++dev->estats.rx_dropped_error;
1361 emac_recycle_rx_skb(dev, slot, 0);
1362 len = 0;
1363 goto next;
1364 }
1357 1365
1358 /* enable broadcast/individual address and RX FIFO defaults */ 1366 if (len && len < EMAC_RX_COPY_THRESH) {
1359 out_be32(&emacp->em0rmr, EMAC_RMR_DEFAULT); 1367 struct sk_buff *copy_skb =
1368 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1369 if (unlikely(!copy_skb))
1370 goto oom;
1371
1372 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1373 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1374 len + 2);
1375 emac_recycle_rx_skb(dev, slot, len);
1376 skb = copy_skb;
1377 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1378 goto oom;
1379
1380 skb_put(skb, len);
1381 push_packet:
1382 skb->dev = dev->ndev;
1383 skb->protocol = eth_type_trans(skb, dev->ndev);
1384 emac_rx_csum(dev, skb, ctrl);
1385
1386 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1387 ++dev->estats.rx_dropped_stack;
1388 next:
1389 ++dev->stats.rx_packets;
1390 skip:
1391 dev->stats.rx_bytes += len;
1392 slot = (slot + 1) % NUM_RX_BUFF;
1393 --budget;
1394 ++received;
1395 continue;
1396 sg:
1397 if (ctrl & MAL_RX_CTRL_FIRST) {
1398 BUG_ON(dev->rx_sg_skb);
1399 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1400 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1401 ++dev->estats.rx_dropped_oom;
1402 emac_recycle_rx_skb(dev, slot, 0);
1403 } else {
1404 dev->rx_sg_skb = skb;
1405 skb_put(skb, len);
1406 }
1407 } else if (!emac_rx_sg_append(dev, slot) &&
1408 (ctrl & MAL_RX_CTRL_LAST)) {
1409
1410 skb = dev->rx_sg_skb;
1411 dev->rx_sg_skb = NULL;
1412
1413 ctrl &= EMAC_BAD_RX_MASK;
1414 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1415 emac_parse_rx_error(dev, ctrl);
1416 ++dev->estats.rx_dropped_error;
1417 dev_kfree_skb(skb);
1418 len = 0;
1419 } else
1420 goto push_packet;
1421 }
1422 goto skip;
1423 oom:
1424 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1425 /* Drop the packet and recycle skb */
1426 ++dev->estats.rx_dropped_oom;
1427 emac_recycle_rx_skb(dev, slot, 0);
1428 goto next;
1429 }
1360 1430
1361 /* set transmit request threshold register */ 1431 if (received) {
1362 out_be32(&emacp->em0trtr, EMAC_TRTR_DEFAULT); 1432 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1433 dev->rx_slot = slot;
1434 }
1363 1435
1364 /* Reconfigure multicast */ 1436 if (unlikely(budget && dev->commac.rx_stopped)) {
1365 __emac_set_multicast_list(fep->ndev); 1437 struct ocp_func_emac_data *emacdata = dev->def->additions;
1366 1438
1367 /* Set receiver/transmitter defaults */ 1439 barrier();
1368 out_be32(&emacp->em0rwmr, EMAC_RWMR_DEFAULT); 1440 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1369 out_be32(&emacp->em0tmr0, EMAC_TMR0_DEFAULT); 1441 DBG2("%d: rx restart" NL, dev->def->index);
1370 out_be32(&emacp->em0tmr1, EMAC_TMR1_DEFAULT); 1442 received = 0;
1443 goto again;
1444 }
1371 1445
1372 /* set frame gap */ 1446 if (dev->rx_sg_skb) {
1373 out_be32(&emacp->em0ipgvr, CONFIG_IBM_EMAC_FGAP); 1447 DBG2("%d: dropping partial rx packet" NL,
1374 1448 dev->def->index);
1375 /* set VLAN Tag Protocol Identifier */ 1449 ++dev->estats.rx_dropped_error;
1376 out_be32(&emacp->em0vtpid, 0x8100); 1450 dev_kfree_skb(dev->rx_sg_skb);
1451 dev->rx_sg_skb = NULL;
1452 }
1377 1453
1378 /* Init ring buffers */ 1454 dev->commac.rx_stopped = 0;
1379 emac_init_rings(fep->ndev); 1455 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1456 emac_rx_enable(dev);
1457 dev->rx_slot = 0;
1458 }
1459 return received;
1380} 1460}
1381 1461
1382static void emac_kick(struct ocp_enet_private *fep) 1462/* BHs disabled */
1463static int emac_peek_rx(void *param)
1383{ 1464{
1384 emac_t *emacp = fep->emacp; 1465 struct ocp_enet_private *dev = param;
1385 unsigned long emac_ier; 1466 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1386 1467}
1387 emac_ier = EMAC_ISR_PP | EMAC_ISR_BP | EMAC_ISR_RP |
1388 EMAC_ISR_SE | EMAC_ISR_PTLE | EMAC_ISR_ALE |
1389 EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1390 1468
1391 out_be32(&emacp->em0iser, emac_ier); 1469/* BHs disabled */
1470static int emac_peek_rx_sg(void *param)
1471{
1472 struct ocp_enet_private *dev = param;
1473 int slot = dev->rx_slot;
1474 while (1) {
1475 u16 ctrl = dev->rx_desc[slot].ctrl;
1476 if (ctrl & MAL_RX_CTRL_EMPTY)
1477 return 0;
1478 else if (ctrl & MAL_RX_CTRL_LAST)
1479 return 1;
1392 1480
1393 /* enable all MAL transmit and receive channels */ 1481 slot = (slot + 1) % NUM_RX_BUFF;
1394 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1395 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1396 1482
1397 /* set transmit and receive enable */ 1483 /* I'm just being paranoid here :) */
1398 out_be32(&emacp->em0mr0, EMAC_M0_TXE | EMAC_M0_RXE); 1484 if (unlikely(slot == dev->rx_slot))
1485 return 0;
1486 }
1399} 1487}
1400 1488
1401static void 1489/* Hard IRQ */
1402emac_start_link(struct ocp_enet_private *fep, struct ethtool_cmd *ep) 1490static void emac_rxde(void *param)
1403{ 1491{
1404 u32 advertise; 1492 struct ocp_enet_private *dev = param;
1405 int autoneg; 1493 ++dev->estats.rx_stopped;
1406 int forced_speed; 1494 emac_rx_disable_async(dev);
1407 int forced_duplex; 1495}
1408 1496
1409 /* Default advertise */ 1497/* Hard IRQ */
1410 advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 1498static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1411 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 1499{
1412 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; 1500 struct ocp_enet_private *dev = dev_instance;
1413 autoneg = fep->want_autoneg; 1501 struct emac_regs *p = dev->emacp;
1414 forced_speed = fep->phy_mii.speed; 1502 struct ibm_emac_error_stats *st = &dev->estats;
1415 forced_duplex = fep->phy_mii.duplex; 1503
1504 u32 isr = in_be32(&p->isr);
1505 out_be32(&p->isr, isr);
1506
1507 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1508
1509 if (isr & EMAC_ISR_TXPE)
1510 ++st->tx_parity;
1511 if (isr & EMAC_ISR_RXPE)
1512 ++st->rx_parity;
1513 if (isr & EMAC_ISR_TXUE)
1514 ++st->tx_underrun;
1515 if (isr & EMAC_ISR_RXOE)
1516 ++st->rx_fifo_overrun;
1517 if (isr & EMAC_ISR_OVR)
1518 ++st->rx_overrun;
1519 if (isr & EMAC_ISR_BP)
1520 ++st->rx_bad_packet;
1521 if (isr & EMAC_ISR_RP)
1522 ++st->rx_runt_packet;
1523 if (isr & EMAC_ISR_SE)
1524 ++st->rx_short_event;
1525 if (isr & EMAC_ISR_ALE)
1526 ++st->rx_alignment_error;
1527 if (isr & EMAC_ISR_BFCS)
1528 ++st->rx_bad_fcs;
1529 if (isr & EMAC_ISR_PTLE)
1530 ++st->rx_packet_too_long;
1531 if (isr & EMAC_ISR_ORE)
1532 ++st->rx_out_of_range;
1533 if (isr & EMAC_ISR_IRE)
1534 ++st->rx_in_range;
1535 if (isr & EMAC_ISR_SQE)
1536 ++st->tx_sqe;
1537 if (isr & EMAC_ISR_TE)
1538 ++st->tx_errors;
1416 1539
1417 /* Setup link parameters */ 1540 return IRQ_HANDLED;
1418 if (ep) { 1541}
1419 if (ep->autoneg == AUTONEG_ENABLE) {
1420 advertise = ep->advertising;
1421 autoneg = 1;
1422 } else {
1423 autoneg = 0;
1424 forced_speed = ep->speed;
1425 forced_duplex = ep->duplex;
1426 }
1427 }
1428 1542
1429 /* Configure PHY & start aneg */ 1543static struct net_device_stats *emac_stats(struct net_device *ndev)
1430 fep->want_autoneg = autoneg; 1544{
1431 if (autoneg) { 1545 struct ocp_enet_private *dev = ndev->priv;
1432 LINK_DEBUG(("%s: start link aneg, advertise: 0x%x\n", 1546 struct ibm_emac_stats *st = &dev->stats;
1433 fep->ndev->name, advertise)); 1547 struct ibm_emac_error_stats *est = &dev->estats;
1434 fep->phy_mii.def->ops->setup_aneg(&fep->phy_mii, advertise); 1548 struct net_device_stats *nst = &dev->nstats;
1435 } else { 1549
1436 LINK_DEBUG(("%s: start link forced, speed: %d, duplex: %d\n", 1550 DBG2("%d: stats" NL, dev->def->index);
1437 fep->ndev->name, forced_speed, forced_duplex)); 1551
1438 fep->phy_mii.def->ops->setup_forced(&fep->phy_mii, forced_speed, 1552 /* Compute "legacy" statistics */
1439 forced_duplex); 1553 local_irq_disable();
1440 } 1554 nst->rx_packets = (unsigned long)st->rx_packets;
1441 fep->timer_ticks = 0; 1555 nst->rx_bytes = (unsigned long)st->rx_bytes;
1442 mod_timer(&fep->link_timer, jiffies + HZ); 1556 nst->tx_packets = (unsigned long)st->tx_packets;
1557 nst->tx_bytes = (unsigned long)st->tx_bytes;
1558 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1559 est->rx_dropped_error +
1560 est->rx_dropped_resize +
1561 est->rx_dropped_mtu);
1562 nst->tx_dropped = (unsigned long)est->tx_dropped;
1563
1564 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1565 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1566 est->rx_fifo_overrun +
1567 est->rx_overrun);
1568 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1569 est->rx_alignment_error);
1570 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1571 est->rx_bad_fcs);
1572 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1573 est->rx_bd_short_event +
1574 est->rx_bd_packet_too_long +
1575 est->rx_bd_out_of_range +
1576 est->rx_bd_in_range +
1577 est->rx_runt_packet +
1578 est->rx_short_event +
1579 est->rx_packet_too_long +
1580 est->rx_out_of_range +
1581 est->rx_in_range);
1582
1583 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1584 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1585 est->tx_underrun);
1586 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1587 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1588 est->tx_bd_excessive_collisions +
1589 est->tx_bd_late_collision +
1590 est->tx_bd_multple_collisions);
1591 local_irq_enable();
1592 return nst;
1443} 1593}
1444 1594
1445static void emac_link_timer(unsigned long data) 1595static void emac_remove(struct ocp_device *ocpdev)
1446{ 1596{
1447 struct ocp_enet_private *fep = (struct ocp_enet_private *)data; 1597 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1448 int link;
1449 1598
1450 if (fep->going_away) 1599 DBG("%d: remove" NL, dev->def->index);
1451 return;
1452 1600
1453 spin_lock_irq(&fep->lock); 1601 ocp_set_drvdata(ocpdev, 0);
1602 unregister_netdev(dev->ndev);
1454 1603
1455 link = fep->phy_mii.def->ops->poll_link(&fep->phy_mii); 1604 tah_fini(dev->tah_dev);
1456 LINK_DEBUG(("%s: poll_link: %d\n", fep->ndev->name, link)); 1605 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1606 zmii_fini(dev->zmii_dev, dev->zmii_input);
1457 1607
1458 if (link == netif_carrier_ok(fep->ndev)) { 1608 emac_dbg_register(dev->def->index, 0);
1459 if (!link && fep->want_autoneg && (++fep->timer_ticks) > 10) 1609
1460 emac_start_link(fep, NULL); 1610 mal_unregister_commac(dev->mal, &dev->commac);
1461 goto out; 1611 iounmap((void *)dev->emacp);
1462 } 1612 kfree(dev->ndev);
1463 printk(KERN_INFO "%s: Link is %s\n", fep->ndev->name,
1464 link ? "Up" : "Down");
1465 if (link) {
1466 netif_carrier_on(fep->ndev);
1467 /* Chip needs a full reset on config change. That sucks, so I
1468 * should ultimately move that to some tasklet to limit
1469 * latency peaks caused by this code
1470 */
1471 emac_reset_configure(fep);
1472 if (fep->opened)
1473 emac_kick(fep);
1474 } else {
1475 fep->timer_ticks = 0;
1476 netif_carrier_off(fep->ndev);
1477 }
1478 out:
1479 mod_timer(&fep->link_timer, jiffies + HZ);
1480 spin_unlock_irq(&fep->lock);
1481} 1613}
1482 1614
1483static void emac_set_multicast_list(struct net_device *dev) 1615static struct mal_commac_ops emac_commac_ops = {
1484{ 1616 .poll_tx = &emac_poll_tx,
1485 struct ocp_enet_private *fep = dev->priv; 1617 .poll_rx = &emac_poll_rx,
1618 .peek_rx = &emac_peek_rx,
1619 .rxde = &emac_rxde,
1620};
1486 1621
1487 spin_lock_irq(&fep->lock); 1622static struct mal_commac_ops emac_commac_sg_ops = {
1488 __emac_set_multicast_list(dev); 1623 .poll_tx = &emac_poll_tx,
1489 spin_unlock_irq(&fep->lock); 1624 .poll_rx = &emac_poll_rx,
1490} 1625 .peek_rx = &emac_peek_rx_sg,
1626 .rxde = &emac_rxde,
1627};
1491 1628
1492static int emac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 1629/* Ethtool support */
1630static int emac_ethtool_get_settings(struct net_device *ndev,
1631 struct ethtool_cmd *cmd)
1493{ 1632{
1494 struct ocp_enet_private *fep = ndev->priv; 1633 struct ocp_enet_private *dev = ndev->priv;
1495 1634
1496 cmd->supported = fep->phy_mii.def->features; 1635 cmd->supported = dev->phy.features;
1497 cmd->port = PORT_MII; 1636 cmd->port = PORT_MII;
1498 cmd->transceiver = XCVR_EXTERNAL; 1637 cmd->phy_address = dev->phy.address;
1499 cmd->phy_address = fep->mii_phy_addr; 1638 cmd->transceiver =
1500 spin_lock_irq(&fep->lock); 1639 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1501 cmd->autoneg = fep->want_autoneg; 1640
1502 cmd->speed = fep->phy_mii.speed; 1641 local_bh_disable();
1503 cmd->duplex = fep->phy_mii.duplex; 1642 cmd->advertising = dev->phy.advertising;
1504 spin_unlock_irq(&fep->lock); 1643 cmd->autoneg = dev->phy.autoneg;
1644 cmd->speed = dev->phy.speed;
1645 cmd->duplex = dev->phy.duplex;
1646 local_bh_enable();
1647
1505 return 0; 1648 return 0;
1506} 1649}
1507 1650
1508static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 1651static int emac_ethtool_set_settings(struct net_device *ndev,
1652 struct ethtool_cmd *cmd)
1509{ 1653{
1510 struct ocp_enet_private *fep = ndev->priv; 1654 struct ocp_enet_private *dev = ndev->priv;
1511 unsigned long features = fep->phy_mii.def->features; 1655 u32 f = dev->phy.features;
1512 1656
1513 if (!capable(CAP_NET_ADMIN)) 1657 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1514 return -EPERM; 1658 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1515 1659
1660 /* Basic sanity checks */
1661 if (dev->phy.address < 0)
1662 return -EOPNOTSUPP;
1516 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 1663 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1517 return -EINVAL; 1664 return -EINVAL;
1518 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 1665 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1519 return -EINVAL; 1666 return -EINVAL;
1520 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) 1667 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1521 return -EINVAL; 1668 return -EINVAL;
1522 if (cmd->autoneg == AUTONEG_DISABLE) 1669
1670 if (cmd->autoneg == AUTONEG_DISABLE) {
1523 switch (cmd->speed) { 1671 switch (cmd->speed) {
1524 case SPEED_10: 1672 case SPEED_10:
1525 if (cmd->duplex == DUPLEX_HALF && 1673 if (cmd->duplex == DUPLEX_HALF
1526 (features & SUPPORTED_10baseT_Half) == 0) 1674 && !(f & SUPPORTED_10baseT_Half))
1527 return -EINVAL; 1675 return -EINVAL;
1528 if (cmd->duplex == DUPLEX_FULL && 1676 if (cmd->duplex == DUPLEX_FULL
1529 (features & SUPPORTED_10baseT_Full) == 0) 1677 && !(f & SUPPORTED_10baseT_Full))
1530 return -EINVAL; 1678 return -EINVAL;
1531 break; 1679 break;
1532 case SPEED_100: 1680 case SPEED_100:
1533 if (cmd->duplex == DUPLEX_HALF && 1681 if (cmd->duplex == DUPLEX_HALF
1534 (features & SUPPORTED_100baseT_Half) == 0) 1682 && !(f & SUPPORTED_100baseT_Half))
1535 return -EINVAL; 1683 return -EINVAL;
1536 if (cmd->duplex == DUPLEX_FULL && 1684 if (cmd->duplex == DUPLEX_FULL
1537 (features & SUPPORTED_100baseT_Full) == 0) 1685 && !(f & SUPPORTED_100baseT_Full))
1538 return -EINVAL; 1686 return -EINVAL;
1539 break; 1687 break;
1540 case SPEED_1000: 1688 case SPEED_1000:
1541 if (cmd->duplex == DUPLEX_HALF && 1689 if (cmd->duplex == DUPLEX_HALF
1542 (features & SUPPORTED_1000baseT_Half) == 0) 1690 && !(f & SUPPORTED_1000baseT_Half))
1543 return -EINVAL; 1691 return -EINVAL;
1544 if (cmd->duplex == DUPLEX_FULL && 1692 if (cmd->duplex == DUPLEX_FULL
1545 (features & SUPPORTED_1000baseT_Full) == 0) 1693 && !(f & SUPPORTED_1000baseT_Full))
1546 return -EINVAL; 1694 return -EINVAL;
1547 break; 1695 break;
1548 default: 1696 default:
1549 return -EINVAL; 1697 return -EINVAL;
1550 } else if ((features & SUPPORTED_Autoneg) == 0) 1698 }
1551 return -EINVAL; 1699
1552 spin_lock_irq(&fep->lock); 1700 local_bh_disable();
1553 emac_start_link(fep, cmd); 1701 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1554 spin_unlock_irq(&fep->lock); 1702 cmd->duplex);
1703
1704 } else {
1705 if (!(f & SUPPORTED_Autoneg))
1706 return -EINVAL;
1707
1708 local_bh_disable();
1709 dev->phy.def->ops->setup_aneg(&dev->phy,
1710 (cmd->advertising & f) |
1711 (dev->phy.advertising &
1712 (ADVERTISED_Pause |
1713 ADVERTISED_Asym_Pause)));
1714 }
1715 emac_force_link_update(dev);
1716 local_bh_enable();
1717
1555 return 0; 1718 return 0;
1556} 1719}
1557 1720
1558static void 1721static void emac_ethtool_get_ringparam(struct net_device *ndev,
1559emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) 1722 struct ethtool_ringparam *rp)
1560{ 1723{
1561 struct ocp_enet_private *fep = ndev->priv; 1724 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1562 1725 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1563 strcpy(info->driver, DRV_NAME);
1564 strcpy(info->version, DRV_VERSION);
1565 info->fw_version[0] = '\0';
1566 sprintf(info->bus_info, "IBM EMAC %d", fep->ocpdev->def->index);
1567 info->regdump_len = 0;
1568} 1726}
1569 1727
1570static int emac_nway_reset(struct net_device *ndev) 1728static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1729 struct ethtool_pauseparam *pp)
1571{ 1730{
1572 struct ocp_enet_private *fep = ndev->priv; 1731 struct ocp_enet_private *dev = ndev->priv;
1732
1733 local_bh_disable();
1734 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1735 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1736 pp->autoneg = 1;
1737
1738 if (dev->phy.duplex == DUPLEX_FULL) {
1739 if (dev->phy.pause)
1740 pp->rx_pause = pp->tx_pause = 1;
1741 else if (dev->phy.asym_pause)
1742 pp->tx_pause = 1;
1743 }
1744 local_bh_enable();
1745}
1573 1746
1574 if (!fep->want_autoneg) 1747static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1575 return -EINVAL; 1748{
1576 spin_lock_irq(&fep->lock); 1749 struct ocp_enet_private *dev = ndev->priv;
1577 emac_start_link(fep, NULL); 1750 return dev->tah_dev != 0;
1578 spin_unlock_irq(&fep->lock);
1579 return 0;
1580} 1751}
1581 1752
1582static u32 emac_get_link(struct net_device *ndev) 1753static int emac_get_regs_len(struct ocp_enet_private *dev)
1583{ 1754{
1584 return netif_carrier_ok(ndev); 1755 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1585} 1756}
1586 1757
1587static struct ethtool_ops emac_ethtool_ops = { 1758static int emac_ethtool_get_regs_len(struct net_device *ndev)
1588 .get_settings = emac_get_settings, 1759{
1589 .set_settings = emac_set_settings, 1760 struct ocp_enet_private *dev = ndev->priv;
1590 .get_drvinfo = emac_get_drvinfo, 1761 return sizeof(struct emac_ethtool_regs_hdr) +
1591 .nway_reset = emac_nway_reset, 1762 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1592 .get_link = emac_get_link 1763 zmii_get_regs_len(dev->zmii_dev) +
1593}; 1764 rgmii_get_regs_len(dev->rgmii_dev) +
1765 tah_get_regs_len(dev->tah_dev);
1766}
1594 1767
1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1768static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1596{ 1769{
1597 struct ocp_enet_private *fep = dev->priv; 1770 struct emac_ethtool_regs_subhdr *hdr = buf;
1598 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1599 1771
1600 switch (cmd) { 1772 hdr->version = EMAC_ETHTOOL_REGS_VER;
1601 case SIOCGMIIPHY: 1773 hdr->index = dev->def->index;
1602 data[0] = fep->mii_phy_addr; 1774 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1603 /* Fall through */ 1775 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1604 case SIOCGMIIREG: 1776}
1605 data[3] = emac_phy_read(dev, fep->mii_phy_addr, data[1]);
1606 return 0;
1607 case SIOCSMIIREG:
1608 if (!capable(CAP_NET_ADMIN))
1609 return -EPERM;
1610 1777
1611 emac_phy_write(dev, fep->mii_phy_addr, data[1], data[2]); 1778static void emac_ethtool_get_regs(struct net_device *ndev,
1612 return 0; 1779 struct ethtool_regs *regs, void *buf)
1613 default: 1780{
1614 return -EOPNOTSUPP; 1781 struct ocp_enet_private *dev = ndev->priv;
1782 struct emac_ethtool_regs_hdr *hdr = buf;
1783
1784 hdr->components = 0;
1785 buf = hdr + 1;
1786
1787 local_irq_disable();
1788 buf = mal_dump_regs(dev->mal, buf);
1789 buf = emac_dump_regs(dev, buf);
1790 if (dev->zmii_dev) {
1791 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1792 buf = zmii_dump_regs(dev->zmii_dev, buf);
1615 } 1793 }
1794 if (dev->rgmii_dev) {
1795 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1796 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1797 }
1798 if (dev->tah_dev) {
1799 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1800 buf = tah_dump_regs(dev->tah_dev, buf);
1801 }
1802 local_irq_enable();
1616} 1803}
1617 1804
1618static int emac_open(struct net_device *dev) 1805static int emac_ethtool_nway_reset(struct net_device *ndev)
1619{ 1806{
1620 struct ocp_enet_private *fep = dev->priv; 1807 struct ocp_enet_private *dev = ndev->priv;
1621 int rc; 1808 int res = 0;
1622 1809
1623 spin_lock_irq(&fep->lock); 1810 DBG("%d: nway_reset" NL, dev->def->index);
1624 1811
1625 fep->opened = 1; 1812 if (dev->phy.address < 0)
1626 netif_carrier_off(dev); 1813 return -EOPNOTSUPP;
1627 1814
1628 /* Reset & configure the chip */ 1815 local_bh_disable();
1629 emac_reset_configure(fep); 1816 if (!dev->phy.autoneg) {
1817 res = -EINVAL;
1818 goto out;
1819 }
1630 1820
1631 spin_unlock_irq(&fep->lock); 1821 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1822 emac_force_link_update(dev);
1632 1823
1633 /* Request our interrupt lines */ 1824 out:
1634 rc = request_irq(dev->irq, emac_mac_irq, 0, "IBM EMAC MAC", dev); 1825 local_bh_enable();
1635 if (rc != 0) { 1826 return res;
1636 printk("dev->irq %d failed\n", dev->irq); 1827}
1637 goto bail;
1638 }
1639 /* Kick the chip rx & tx channels into life */
1640 spin_lock_irq(&fep->lock);
1641 emac_kick(fep);
1642 spin_unlock_irq(&fep->lock);
1643 1828
1644 netif_start_queue(dev); 1829static int emac_ethtool_get_stats_count(struct net_device *ndev)
1645 bail: 1830{
1646 return rc; 1831 return EMAC_ETHTOOL_STATS_COUNT;
1647} 1832}
1648 1833
1649static int emac_close(struct net_device *dev) 1834static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1835 u8 * buf)
1650{ 1836{
1651 struct ocp_enet_private *fep = dev->priv; 1837 if (stringset == ETH_SS_STATS)
1652 emac_t *emacp = fep->emacp; 1838 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1839}
1653 1840
1654 /* XXX Stop IRQ emitting here */ 1841static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1655 spin_lock_irq(&fep->lock); 1842 struct ethtool_stats *estats,
1656 fep->opened = 0; 1843 u64 * tmp_stats)
1657 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask); 1844{
1658 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 1845 struct ocp_enet_private *dev = ndev->priv;
1659 netif_carrier_off(dev); 1846 local_irq_disable();
1660 netif_stop_queue(dev); 1847 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1848 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1849 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1850 local_irq_enable();
1851}
1661 1852
1662 /* 1853static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1663 * Check for a link, some PHYs don't provide a clock if 1854 struct ethtool_drvinfo *info)
1664 * no link is present. Some EMACs will not come out of 1855{
1665 * soft reset without a PHY clock present. 1856 struct ocp_enet_private *dev = ndev->priv;
1666 */
1667 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1668 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1669 udelay(10);
1670 1857
1671 if (emacp->em0mr0 & EMAC_M0_SRST) { 1858 strcpy(info->driver, "ibm_emac");
1672 /*not sure what to do here hopefully it clears before another open */ 1859 strcpy(info->version, DRV_VERSION);
1673 printk(KERN_ERR 1860 info->fw_version[0] = '\0';
1674 "%s: Phy SoftReset didn't clear, no link?\n", 1861 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1675 dev->name); 1862 info->n_stats = emac_ethtool_get_stats_count(ndev);
1676 } 1863 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1677 } 1864}
1678 1865
1679 /* Free the irq's */ 1866static struct ethtool_ops emac_ethtool_ops = {
1680 free_irq(dev->irq, dev); 1867 .get_settings = emac_ethtool_get_settings,
1868 .set_settings = emac_ethtool_set_settings,
1869 .get_drvinfo = emac_ethtool_get_drvinfo,
1681 1870
1682 spin_unlock_irq(&fep->lock); 1871 .get_regs_len = emac_ethtool_get_regs_len,
1872 .get_regs = emac_ethtool_get_regs,
1683 1873
1684 return 0; 1874 .nway_reset = emac_ethtool_nway_reset,
1685}
1686 1875
1687static void emac_remove(struct ocp_device *ocpdev) 1876 .get_ringparam = emac_ethtool_get_ringparam,
1688{ 1877 .get_pauseparam = emac_ethtool_get_pauseparam,
1689 struct net_device *dev = ocp_get_drvdata(ocpdev); 1878
1690 struct ocp_enet_private *ep = dev->priv; 1879 .get_rx_csum = emac_ethtool_get_rx_csum,
1691 1880
1692 /* FIXME: locking, races, ... */ 1881 .get_strings = emac_ethtool_get_strings,
1693 ep->going_away = 1; 1882 .get_stats_count = emac_ethtool_get_stats_count,
1694 ocp_set_drvdata(ocpdev, NULL); 1883 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1695 if (ep->rgmii_dev) 1884
1696 emac_close_rgmii(ep->rgmii_dev); 1885 .get_link = ethtool_op_get_link,
1697 if (ep->zmii_dev) 1886 .get_tx_csum = ethtool_op_get_tx_csum,
1698 emac_close_zmii(ep->zmii_dev); 1887 .get_sg = ethtool_op_get_sg,
1699
1700 unregister_netdev(dev);
1701 del_timer_sync(&ep->link_timer);
1702 mal_unregister_commac(ep->mal, &ep->commac);
1703 iounmap((void *)ep->emacp);
1704 kfree(dev);
1705}
1706
1707struct mal_commac_ops emac_commac_ops = {
1708 .txeob = &emac_txeob_dev,
1709 .txde = &emac_txde_dev,
1710 .rxeob = &emac_rxeob_dev,
1711 .rxde = &emac_rxde_dev,
1712}; 1888};
1713 1889
1714#ifdef CONFIG_NET_POLL_CONTROLLER 1890static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1715static void emac_netpoll(struct net_device *ndev)
1716{ 1891{
1717 emac_rxeob_dev((void *)ndev, 0); 1892 struct ocp_enet_private *dev = ndev->priv;
1718 emac_txeob_dev((void *)ndev, 0); 1893 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1894
1895 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1896
1897 if (dev->phy.address < 0)
1898 return -EOPNOTSUPP;
1899
1900 switch (cmd) {
1901 case SIOCGMIIPHY:
1902 case SIOCDEVPRIVATE:
1903 data[0] = dev->phy.address;
1904 /* Fall through */
1905 case SIOCGMIIREG:
1906 case SIOCDEVPRIVATE + 1:
1907 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1908 return 0;
1909
1910 case SIOCSMIIREG:
1911 case SIOCDEVPRIVATE + 2:
1912 if (!capable(CAP_NET_ADMIN))
1913 return -EPERM;
1914 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1915 return 0;
1916 default:
1917 return -EOPNOTSUPP;
1918 }
1719} 1919}
1720#endif
1721 1920
1722static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) 1921static int __init emac_probe(struct ocp_device *ocpdev)
1723{ 1922{
1724 int deferred_init = 0; 1923 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1725 int rc = 0, i;
1726 struct net_device *ndev; 1924 struct net_device *ndev;
1727 struct ocp_enet_private *ep; 1925 struct ocp_device *maldev;
1728 struct ocp_func_emac_data *emacdata; 1926 struct ocp_enet_private *dev;
1729 int commac_reg = 0; 1927 int err, i;
1730 u32 phy_map; 1928
1929 DBG("%d: probe" NL, ocpdev->def->index);
1731 1930
1732 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1733 if (!emacdata) { 1931 if (!emacdata) {
1734 printk(KERN_ERR "emac%d: Missing additional data!\n", 1932 printk(KERN_ERR "emac%d: Missing additional data!\n",
1735 ocpdev->def->index); 1933 ocpdev->def->index);
@@ -1738,304 +1936,312 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1738 1936
1739 /* Allocate our net_device structure */ 1937 /* Allocate our net_device structure */
1740 ndev = alloc_etherdev(sizeof(struct ocp_enet_private)); 1938 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1741 if (ndev == NULL) { 1939 if (!ndev) {
1742 printk(KERN_ERR 1940 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1743 "emac%d: Could not allocate ethernet device.\n",
1744 ocpdev->def->index); 1941 ocpdev->def->index);
1745 return -ENOMEM; 1942 return -ENOMEM;
1746 } 1943 }
1747 ep = ndev->priv; 1944 dev = ndev->priv;
1748 ep->ndev = ndev; 1945 dev->ndev = ndev;
1749 ep->ocpdev = ocpdev; 1946 dev->ldev = &ocpdev->dev;
1750 ndev->irq = ocpdev->def->irq; 1947 dev->def = ocpdev->def;
1751 ep->wol_irq = emacdata->wol_irq; 1948 SET_MODULE_OWNER(ndev);
1752 if (emacdata->mdio_idx >= 0) {
1753 if (emacdata->mdio_idx == ocpdev->def->index) {
1754 /* Set the common MDIO net_device */
1755 mdio_ndev = ndev;
1756 deferred_init = 1;
1757 }
1758 ep->mdio_dev = mdio_ndev;
1759 } else {
1760 ep->mdio_dev = ndev;
1761 }
1762 1949
1763 ocp_set_drvdata(ocpdev, ndev); 1950 /* Find MAL device we are connected to */
1764 1951 maldev =
1765 spin_lock_init(&ep->lock); 1952 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1766 1953 if (!maldev) {
1767 /* Fill out MAL informations and register commac */ 1954 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1768 ep->mal = mal; 1955 dev->def->index, emacdata->mal_idx);
1769 ep->mal_tx_chan = emacdata->mal_tx_chan; 1956 err = -ENODEV;
1770 ep->mal_rx_chan = emacdata->mal_rx_chan; 1957 goto out;
1771 ep->commac.ops = &emac_commac_ops; 1958 }
1772 ep->commac.dev = ndev; 1959 dev->mal = ocp_get_drvdata(maldev);
1773 ep->commac.tx_chan_mask = MAL_CHAN_MASK(ep->mal_tx_chan); 1960 if (!dev->mal) {
1774 ep->commac.rx_chan_mask = MAL_CHAN_MASK(ep->mal_rx_chan); 1961 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1775 rc = mal_register_commac(ep->mal, &ep->commac); 1962 dev->def->index, emacdata->mal_idx);
1776 if (rc != 0) 1963 err = -ENODEV;
1777 goto bail; 1964 goto out;
1778 commac_reg = 1;
1779
1780 /* Map our MMIOs */
1781 ep->emacp = (emac_t *) ioremap(ocpdev->def->paddr, sizeof(emac_t));
1782
1783 /* Check if we need to attach to a ZMII */
1784 if (emacdata->zmii_idx >= 0) {
1785 ep->zmii_input = emacdata->zmii_mux;
1786 ep->zmii_dev =
1787 ocp_find_device(OCP_ANY_ID, OCP_FUNC_ZMII,
1788 emacdata->zmii_idx);
1789 if (ep->zmii_dev == NULL)
1790 printk(KERN_WARNING
1791 "emac%d: ZMII %d requested but not found !\n",
1792 ocpdev->def->index, emacdata->zmii_idx);
1793 else if ((rc =
1794 emac_init_zmii(ep->zmii_dev, ep->zmii_input,
1795 emacdata->phy_mode)) != 0)
1796 goto bail;
1797 } 1965 }
1798 1966
1799 /* Check if we need to attach to a RGMII */ 1967 /* Register with MAL */
1800 if (emacdata->rgmii_idx >= 0) { 1968 dev->commac.ops = &emac_commac_ops;
1801 ep->rgmii_input = emacdata->rgmii_mux; 1969 dev->commac.dev = dev;
1802 ep->rgmii_dev = 1970 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1803 ocp_find_device(OCP_ANY_ID, OCP_FUNC_RGMII, 1971 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1804 emacdata->rgmii_idx); 1972 err = mal_register_commac(dev->mal, &dev->commac);
1805 if (ep->rgmii_dev == NULL) 1973 if (err) {
1806 printk(KERN_WARNING 1974 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1807 "emac%d: RGMII %d requested but not found !\n", 1975 dev->def->index, emacdata->mal_idx);
1808 ocpdev->def->index, emacdata->rgmii_idx); 1976 goto out;
1809 else if ((rc = 1977 }
1810 emac_init_rgmii(ep->rgmii_dev, ep->rgmii_input, 1978 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1811 emacdata->phy_mode)) != 0) 1979 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1812 goto bail; 1980
1981 /* Get pointers to BD rings */
1982 dev->tx_desc =
1983 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1984 emacdata->mal_tx_chan);
1985 dev->rx_desc =
1986 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1987 emacdata->mal_rx_chan);
1988
1989 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1990 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1991
1992 /* Clean rings */
1993 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1994 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1995
1996 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1997 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
1998 struct ocp_device *mdiodev =
1999 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2000 emacdata->mdio_idx);
2001 if (!mdiodev) {
2002 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2003 dev->def->index, emacdata->mdio_idx);
2004 err = -ENODEV;
2005 goto out2;
2006 }
2007 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2008 if (!dev->mdio_dev) {
2009 printk(KERN_ERR
2010 "emac%d: emac%d hasn't been initialized yet!\n",
2011 dev->def->index, emacdata->mdio_idx);
2012 err = -ENODEV;
2013 goto out2;
2014 }
1813 } 2015 }
1814 2016
1815 /* Check if we need to attach to a TAH */ 2017 /* Attach to ZMII, if needed */
1816 if (emacdata->tah_idx >= 0) { 2018 if ((err = zmii_attach(dev)) != 0)
1817 ep->tah_dev = 2019 goto out2;
1818 ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH, 2020
1819 emacdata->tah_idx); 2021 /* Attach to RGMII, if needed */
1820 if (ep->tah_dev == NULL) 2022 if ((err = rgmii_attach(dev)) != 0)
1821 printk(KERN_WARNING 2023 goto out3;
1822 "emac%d: TAH %d requested but not found !\n", 2024
1823 ocpdev->def->index, emacdata->tah_idx); 2025 /* Attach to TAH, if needed */
1824 else if ((rc = emac_init_tah(ep)) != 0) 2026 if ((err = tah_attach(dev)) != 0)
1825 goto bail; 2027 goto out4;
2028
2029 /* Map EMAC regs */
2030 dev->emacp =
2031 (struct emac_regs *)ioremap(dev->def->paddr,
2032 sizeof(struct emac_regs));
2033 if (!dev->emacp) {
2034 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2035 dev->def->index);
2036 err = -ENOMEM;
2037 goto out5;
1826 } 2038 }
1827 2039
1828 if (deferred_init) { 2040 /* Fill in MAC address */
1829 if (!list_empty(&emac_init_list)) { 2041 for (i = 0; i < 6; ++i)
1830 struct list_head *entry; 2042 ndev->dev_addr[i] = emacdata->mac_addr[i];
1831 struct emac_def_dev *ddev;
1832 2043
1833 list_for_each(entry, &emac_init_list) { 2044 /* Set some link defaults before we can find out real parameters */
1834 ddev = 2045 dev->phy.speed = SPEED_100;
1835 list_entry(entry, struct emac_def_dev, 2046 dev->phy.duplex = DUPLEX_FULL;
1836 link); 2047 dev->phy.autoneg = AUTONEG_DISABLE;
1837 emac_init_device(ddev->ocpdev, ddev->mal); 2048 dev->phy.pause = dev->phy.asym_pause = 0;
1838 } 2049 init_timer(&dev->link_timer);
2050 dev->link_timer.function = emac_link_timer;
2051 dev->link_timer.data = (unsigned long)dev;
2052
2053 /* Find PHY if any */
2054 dev->phy.dev = ndev;
2055 dev->phy.mode = emacdata->phy_mode;
2056 if (emacdata->phy_map != 0xffffffff) {
2057 u32 phy_map = emacdata->phy_map | busy_phy_map;
2058 u32 adv;
2059
2060 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2061 emacdata->phy_map, busy_phy_map);
2062
2063 EMAC_RX_CLK_TX(dev->def->index);
2064
2065 dev->phy.mdio_read = emac_mdio_read;
2066 dev->phy.mdio_write = emac_mdio_write;
2067
2068 /* Configure EMAC with defaults so we can at least use MDIO
2069 * This is needed mostly for 440GX
2070 */
2071 if (emac_phy_gpcs(dev->phy.mode)) {
2072 /* XXX
2073 * Make GPCS PHY address equal to EMAC index.
2074 * We probably should take into account busy_phy_map
2075 * and/or phy_map here.
2076 */
2077 dev->phy.address = dev->def->index;
1839 } 2078 }
1840 } 2079
2080 emac_configure(dev);
1841 2081
1842 /* Init link monitoring timer */ 2082 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
1843 init_timer(&ep->link_timer); 2083 if (!(phy_map & 1)) {
1844 ep->link_timer.function = emac_link_timer; 2084 int r;
1845 ep->link_timer.data = (unsigned long)ep; 2085 busy_phy_map |= 1 << i;
1846 ep->timer_ticks = 0; 2086
1847 2087 /* Quick check if there is a PHY at the address */
1848 /* Fill up the mii_phy structure */ 2088 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
1849 ep->phy_mii.dev = ndev; 2089 if (r == 0xffff || r < 0)
1850 ep->phy_mii.mdio_read = emac_phy_read; 2090 continue;
1851 ep->phy_mii.mdio_write = emac_phy_write; 2091 if (!mii_phy_probe(&dev->phy, i))
1852 ep->phy_mii.mode = emacdata->phy_mode; 2092 break;
1853 2093 }
1854 /* Find PHY */ 2094 if (i == 0x20) {
1855 phy_map = emacdata->phy_map | busy_phy_map; 2095 printk(KERN_WARNING "emac%d: can't find PHY!\n",
1856 for (i = 0; i <= 0x1f; i++, phy_map >>= 1) { 2096 dev->def->index);
1857 if ((phy_map & 0x1) == 0) { 2097 goto out6;
1858 int val = emac_phy_read(ndev, i, MII_BMCR);
1859 if (val != 0xffff && val != -1)
1860 break;
1861 } 2098 }
1862 }
1863 if (i == 0x20) {
1864 printk(KERN_WARNING "emac%d: Can't find PHY.\n",
1865 ocpdev->def->index);
1866 rc = -ENODEV;
1867 goto bail;
1868 }
1869 busy_phy_map |= 1 << i;
1870 ep->mii_phy_addr = i;
1871 rc = mii_phy_probe(&ep->phy_mii, i);
1872 if (rc) {
1873 printk(KERN_WARNING "emac%d: Failed to probe PHY type.\n",
1874 ocpdev->def->index);
1875 rc = -ENODEV;
1876 goto bail;
1877 }
1878
1879 /* Disable any PHY features not supported by the platform */
1880 ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
1881 2099
1882 /* Setup initial PHY config & startup aneg */ 2100 /* Init PHY */
1883 if (ep->phy_mii.def->ops->init) 2101 if (dev->phy.def->ops->init)
1884 ep->phy_mii.def->ops->init(&ep->phy_mii); 2102 dev->phy.def->ops->init(&dev->phy);
1885 netif_carrier_off(ndev);
1886 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1887 ep->want_autoneg = 1;
1888 else {
1889 ep->want_autoneg = 0;
1890 2103
1891 /* Select highest supported speed/duplex */ 2104 /* Disable any PHY features not supported by the platform */
1892 if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) { 2105 dev->phy.def->features &= ~emacdata->phy_feat_exc;
1893 ep->phy_mii.speed = SPEED_1000; 2106
1894 ep->phy_mii.duplex = DUPLEX_FULL; 2107 /* Setup initial link parameters */
1895 } else if (ep->phy_mii.def->features & 2108 if (dev->phy.features & SUPPORTED_Autoneg) {
1896 SUPPORTED_1000baseT_Half) { 2109 adv = dev->phy.features;
1897 ep->phy_mii.speed = SPEED_1000; 2110#if !defined(CONFIG_40x)
1898 ep->phy_mii.duplex = DUPLEX_HALF; 2111 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1899 } else if (ep->phy_mii.def->features & 2112#endif
1900 SUPPORTED_100baseT_Full) { 2113 /* Restart autonegotiation */
1901 ep->phy_mii.speed = SPEED_100; 2114 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
1902 ep->phy_mii.duplex = DUPLEX_FULL;
1903 } else if (ep->phy_mii.def->features &
1904 SUPPORTED_100baseT_Half) {
1905 ep->phy_mii.speed = SPEED_100;
1906 ep->phy_mii.duplex = DUPLEX_HALF;
1907 } else if (ep->phy_mii.def->features &
1908 SUPPORTED_10baseT_Full) {
1909 ep->phy_mii.speed = SPEED_10;
1910 ep->phy_mii.duplex = DUPLEX_FULL;
1911 } else { 2115 } else {
1912 ep->phy_mii.speed = SPEED_10; 2116 u32 f = dev->phy.def->features;
1913 ep->phy_mii.duplex = DUPLEX_HALF; 2117 int speed = SPEED_10, fd = DUPLEX_HALF;
2118
2119 /* Select highest supported speed/duplex */
2120 if (f & SUPPORTED_1000baseT_Full) {
2121 speed = SPEED_1000;
2122 fd = DUPLEX_FULL;
2123 } else if (f & SUPPORTED_1000baseT_Half)
2124 speed = SPEED_1000;
2125 else if (f & SUPPORTED_100baseT_Full) {
2126 speed = SPEED_100;
2127 fd = DUPLEX_FULL;
2128 } else if (f & SUPPORTED_100baseT_Half)
2129 speed = SPEED_100;
2130 else if (f & SUPPORTED_10baseT_Full)
2131 fd = DUPLEX_FULL;
2132
2133 /* Force link parameters */
2134 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
1914 } 2135 }
1915 } 2136 } else {
1916 emac_start_link(ep, NULL); 2137 emac_reset(dev);
1917 2138
1918 /* read the MAC Address */ 2139 /* PHY-less configuration.
1919 for (i = 0; i < 6; i++) 2140 * XXX I probably should move these settings to emacdata
1920 ndev->dev_addr[i] = emacdata->mac_addr[i]; 2141 */
2142 dev->phy.address = -1;
2143 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2144 dev->phy.pause = 1;
2145 }
1921 2146
1922 /* Fill in the driver function table */ 2147 /* Fill in the driver function table */
1923 ndev->open = &emac_open; 2148 ndev->open = &emac_open;
1924 ndev->hard_start_xmit = &emac_start_xmit; 2149 if (dev->tah_dev) {
2150 ndev->hard_start_xmit = &emac_start_xmit_sg;
2151 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2152 } else
2153 ndev->hard_start_xmit = &emac_start_xmit;
2154 ndev->tx_timeout = &emac_full_tx_reset;
2155 ndev->watchdog_timeo = 5 * HZ;
1925 ndev->stop = &emac_close; 2156 ndev->stop = &emac_close;
1926 ndev->get_stats = &emac_stats; 2157 ndev->get_stats = &emac_stats;
1927 if (emacdata->jumbo)
1928 ndev->change_mtu = &emac_change_mtu;
1929 ndev->set_mac_address = &emac_set_mac_address;
1930 ndev->set_multicast_list = &emac_set_multicast_list; 2158 ndev->set_multicast_list = &emac_set_multicast_list;
1931 ndev->do_ioctl = &emac_ioctl; 2159 ndev->do_ioctl = &emac_ioctl;
2160 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2161 ndev->change_mtu = &emac_change_mtu;
2162 dev->commac.ops = &emac_commac_sg_ops;
2163 }
1932 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2164 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
1933 if (emacdata->tah_idx >= 0)
1934 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
1935#ifdef CONFIG_NET_POLL_CONTROLLER
1936 ndev->poll_controller = emac_netpoll;
1937#endif
1938 2165
1939 SET_MODULE_OWNER(ndev); 2166 netif_carrier_off(ndev);
2167 netif_stop_queue(ndev);
2168
2169 err = register_netdev(ndev);
2170 if (err) {
2171 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2172 dev->def->index, err);
2173 goto out6;
2174 }
1940 2175
1941 rc = register_netdev(ndev); 2176 ocp_set_drvdata(ocpdev, dev);
1942 if (rc != 0)
1943 goto bail;
1944 2177
1945 printk("%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", 2178 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
1946 ndev->name, 2179 ndev->name, dev->def->index,
1947 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], 2180 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1948 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); 2181 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1949 printk(KERN_INFO "%s: Found %s PHY (0x%02x)\n",
1950 ndev->name, ep->phy_mii.def->name, ep->mii_phy_addr);
1951 2182
1952 bail: 2183 if (dev->phy.address >= 0)
1953 if (rc && commac_reg) 2184 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
1954 mal_unregister_commac(ep->mal, &ep->commac); 2185 dev->phy.def->name, dev->phy.address);
1955 if (rc && ndev)
1956 kfree(ndev);
1957 2186
1958 return rc; 2187 emac_dbg_register(dev->def->index, dev);
1959}
1960
1961static int emac_probe(struct ocp_device *ocpdev)
1962{
1963 struct ocp_device *maldev;
1964 struct ibm_ocp_mal *mal;
1965 struct ocp_func_emac_data *emacdata;
1966
1967 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1968 if (emacdata == NULL) {
1969 printk(KERN_ERR "emac%d: Missing additional datas !\n",
1970 ocpdev->def->index);
1971 return -ENODEV;
1972 }
1973
1974 /* Get the MAL device */
1975 maldev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_MAL, emacdata->mal_idx);
1976 if (maldev == NULL) {
1977 printk("No maldev\n");
1978 return -ENODEV;
1979 }
1980 /*
1981 * Get MAL driver data, it must be here due to link order.
1982 * When the driver is modularized, symbol dependencies will
1983 * ensure the MAL driver is already present if built as a
1984 * module.
1985 */
1986 mal = (struct ibm_ocp_mal *)ocp_get_drvdata(maldev);
1987 if (mal == NULL) {
1988 printk("No maldrv\n");
1989 return -ENODEV;
1990 }
1991
1992 /* If we depend on another EMAC for MDIO, wait for it to show up */
1993 if (emacdata->mdio_idx >= 0 &&
1994 (emacdata->mdio_idx != ocpdev->def->index) && !mdio_ndev) {
1995 struct emac_def_dev *ddev;
1996 /* Add this index to the deferred init table */
1997 ddev = kmalloc(sizeof(struct emac_def_dev), GFP_KERNEL);
1998 ddev->ocpdev = ocpdev;
1999 ddev->mal = mal;
2000 list_add_tail(&ddev->link, &emac_init_list);
2001 } else {
2002 emac_init_device(ocpdev, mal);
2003 }
2004 2188
2005 return 0; 2189 return 0;
2190 out6:
2191 iounmap((void *)dev->emacp);
2192 out5:
2193 tah_fini(dev->tah_dev);
2194 out4:
2195 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2196 out3:
2197 zmii_fini(dev->zmii_dev, dev->zmii_input);
2198 out2:
2199 mal_unregister_commac(dev->mal, &dev->commac);
2200 out:
2201 kfree(ndev);
2202 return err;
2006} 2203}
2007 2204
2008/* Structure for a device driver */
2009static struct ocp_device_id emac_ids[] = { 2205static struct ocp_device_id emac_ids[] = {
2010 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_EMAC}, 2206 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2011 {.vendor = OCP_VENDOR_INVALID} 2207 { .vendor = OCP_VENDOR_INVALID}
2012}; 2208};
2013 2209
2014static struct ocp_driver emac_driver = { 2210static struct ocp_driver emac_driver = {
2015 .name = "emac", 2211 .name = "emac",
2016 .id_table = emac_ids, 2212 .id_table = emac_ids,
2017
2018 .probe = emac_probe, 2213 .probe = emac_probe,
2019 .remove = emac_remove, 2214 .remove = emac_remove,
2020}; 2215};
2021 2216
2022static int __init emac_init(void) 2217static int __init emac_init(void)
2023{ 2218{
2024 printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n"); 2219 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2025 printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n"); 2220
2221 DBG(": init" NL);
2026 2222
2027 if (skb_res > 2) { 2223 if (mal_init())
2028 printk(KERN_WARNING "Invalid skb_res: %d, cropping to 2\n", 2224 return -ENODEV;
2029 skb_res); 2225
2030 skb_res = 2; 2226 EMAC_CLK_INTERNAL;
2227 if (ocp_register_driver(&emac_driver)) {
2228 EMAC_CLK_EXTERNAL;
2229 ocp_unregister_driver(&emac_driver);
2230 mal_exit();
2231 return -ENODEV;
2031 } 2232 }
2233 EMAC_CLK_EXTERNAL;
2032 2234
2033 return ocp_register_driver(&emac_driver); 2235 emac_init_debug();
2236 return 0;
2034} 2237}
2035 2238
2036static void __exit emac_exit(void) 2239static void __exit emac_exit(void)
2037{ 2240{
2241 DBG(": exit" NL);
2038 ocp_unregister_driver(&emac_driver); 2242 ocp_unregister_driver(&emac_driver);
2243 mal_exit();
2244 emac_fini_debug();
2039} 2245}
2040 2246
2041module_init(emac_init); 2247module_init(emac_init);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
index 97e6e1ea8c89..e9b44d030ac3 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.h
+++ b/drivers/net/ibm_emac/ibm_emac_core.h
@@ -1,146 +1,221 @@
1/* 1/*
2 * ibm_emac_core.h 2 * drivers/net/ibm_emac/ibm_emac_core.h
3 * 3 *
4 * Ethernet driver for the built in ethernet on the IBM 405 PowerPC 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * processor.
6 * 5 *
7 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
8 * Sept, 2001 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * 8 *
10 * Orignial driver 9 * Based on original work by
11 * Johnnie Peters 10 * Armin Kuster <akuster@mvista.com>
12 * jpeters@mvista.com 11 * Johnnie Peters <jpeters@mvista.com>
13 * 12 * Copyright 2000, 2001 MontaVista Softare Inc.
14 * Copyright 2000 MontaVista Softare Inc.
15 * 13 *
16 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version. 17 * option) any later version.
18 *
20 */ 19 */
20#ifndef __IBM_EMAC_CORE_H_
21#define __IBM_EMAC_CORE_H_
21 22
22#ifndef _IBM_EMAC_CORE_H_ 23#include <linux/config.h>
23#define _IBM_EMAC_CORE_H_
24
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/dma-mapping.h>
26#include <asm/ocp.h> 26#include <asm/ocp.h>
27#include <asm/mmu.h> /* For phys_addr_t */
28 27
29#include "ibm_emac.h" 28#include "ibm_emac.h"
30#include "ibm_emac_phy.h" 29#include "ibm_emac_phy.h"
31#include "ibm_emac_rgmii.h"
32#include "ibm_emac_zmii.h" 30#include "ibm_emac_zmii.h"
31#include "ibm_emac_rgmii.h"
33#include "ibm_emac_mal.h" 32#include "ibm_emac_mal.h"
34#include "ibm_emac_tah.h" 33#include "ibm_emac_tah.h"
35 34
36#ifndef CONFIG_IBM_EMAC_TXB 35#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
37#define NUM_TX_BUFF 64 36#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
38#define NUM_RX_BUFF 64
39#else
40#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
41#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
42#endif
43 37
44/* This does 16 byte alignment, exactly what we need. 38/* Simple sanity check */
45 * The packet length includes FCS, but we don't want to 39#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
46 * include that when passing upstream as it messes up 40#error Invalid number of buffer descriptors (greater than 256)
47 * bridging applications.
48 */
49#ifndef CONFIG_IBM_EMAC_SKBRES
50#define SKB_RES 2
51#else
52#define SKB_RES CONFIG_IBM_EMAC_SKBRES
53#endif 41#endif
54 42
55/* Note about alignement. alloc_skb() returns a cache line 43// XXX
56 * aligned buffer. However, dev_alloc_skb() will add 16 more 44#define EMAC_MIN_MTU 46
57 * bytes and "reserve" them, so our buffer will actually end 45#define EMAC_MAX_MTU 9000
58 * on a half cache line. What we do is to use directly 46
59 * alloc_skb, allocate 16 more bytes to match the total amount 47/* Maximum L2 header length (VLAN tagged, no FCS) */
60 * allocated by dev_alloc_skb(), but we don't reserve. 48#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
49
50/* RX BD size for the given MTU */
51static inline int emac_rx_size(int mtu)
52{
53 if (mtu > ETH_DATA_LEN)
54 return MAL_MAX_RX_SIZE;
55 else
56 return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
57}
58
59#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
60
61#define EMAC_RX_SKB_HEADROOM \
62 EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
63
64/* Size of RX skb for the given MTU */
65static inline int emac_rx_skb_size(int mtu)
66{
67 int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
68 return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
69}
70
71/* RX DMA sync size */
72static inline int emac_rx_sync_size(int mtu)
73{
74 return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
75}
76
77/* Driver statistcs is split into two parts to make it more cache friendly:
78 * - normal statistics (packet count, etc)
79 * - error statistics
80 *
81 * When statistics is requested by ethtool, these parts are concatenated,
82 * normal one goes first.
83 *
84 * Please, keep these structures in sync with emac_stats_keys.
61 */ 85 */
62#define MAX_NUM_BUF_DESC 255 86
63#define DESC_BUF_SIZE 4080 /* max 4096-16 */ 87/* Normal TX/RX Statistics */
64#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16) 88struct ibm_emac_stats {
65 89 u64 rx_packets;
66/* Transmitter timeout. */ 90 u64 rx_bytes;
67#define TX_TIMEOUT (2*HZ) 91 u64 tx_packets;
68 92 u64 tx_bytes;
69/* MDIO latency delay */ 93 u64 rx_packets_csum;
70#define MDIO_DELAY 250 94 u64 tx_packets_csum;
71 95};
72/* Power managment shift registers */ 96
73#define IBM_CPM_EMMII 0 /* Shift value for MII */ 97/* Error statistics */
74#define IBM_CPM_EMRX 1 /* Shift value for recv */ 98struct ibm_emac_error_stats {
75#define IBM_CPM_EMTX 2 /* Shift value for MAC */ 99 u64 tx_undo;
76#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX)) 100
77 101 /* Software RX Errors */
78#define ENET_HEADER_SIZE 14 102 u64 rx_dropped_stack;
79#define ENET_FCS_SIZE 4 103 u64 rx_dropped_oom;
80#define ENET_DEF_MTU_SIZE 1500 104 u64 rx_dropped_error;
81#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE) 105 u64 rx_dropped_resize;
82#define EMAC_MIN_FRAME 64 106 u64 rx_dropped_mtu;
83#define EMAC_MAX_FRAME 9018 107 u64 rx_stopped;
84#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) 108 /* BD reported RX errors */
85#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) 109 u64 rx_bd_errors;
86 110 u64 rx_bd_overrun;
87#ifdef CONFIG_IBM_EMAC_ERRMSG 111 u64 rx_bd_bad_packet;
88void emac_serr_dump_0(struct net_device *dev); 112 u64 rx_bd_runt_packet;
89void emac_serr_dump_1(struct net_device *dev); 113 u64 rx_bd_short_event;
90void emac_err_dump(struct net_device *dev, int em0isr); 114 u64 rx_bd_alignment_error;
91void emac_phy_dump(struct net_device *); 115 u64 rx_bd_bad_fcs;
92void emac_desc_dump(struct net_device *); 116 u64 rx_bd_packet_too_long;
93void emac_mac_dump(struct net_device *); 117 u64 rx_bd_out_of_range;
94void emac_mal_dump(struct net_device *); 118 u64 rx_bd_in_range;
95#else 119 /* EMAC IRQ reported RX errors */
96#define emac_serr_dump_0(dev) do { } while (0) 120 u64 rx_parity;
97#define emac_serr_dump_1(dev) do { } while (0) 121 u64 rx_fifo_overrun;
98#define emac_err_dump(dev,x) do { } while (0) 122 u64 rx_overrun;
99#define emac_phy_dump(dev) do { } while (0) 123 u64 rx_bad_packet;
100#define emac_desc_dump(dev) do { } while (0) 124 u64 rx_runt_packet;
101#define emac_mac_dump(dev) do { } while (0) 125 u64 rx_short_event;
102#define emac_mal_dump(dev) do { } while (0) 126 u64 rx_alignment_error;
103#endif 127 u64 rx_bad_fcs;
128 u64 rx_packet_too_long;
129 u64 rx_out_of_range;
130 u64 rx_in_range;
131
132 /* Software TX Errors */
133 u64 tx_dropped;
134 /* BD reported TX errors */
135 u64 tx_bd_errors;
136 u64 tx_bd_bad_fcs;
137 u64 tx_bd_carrier_loss;
138 u64 tx_bd_excessive_deferral;
139 u64 tx_bd_excessive_collisions;
140 u64 tx_bd_late_collision;
141 u64 tx_bd_multple_collisions;
142 u64 tx_bd_single_collision;
143 u64 tx_bd_underrun;
144 u64 tx_bd_sqe;
145 /* EMAC IRQ reported TX errors */
146 u64 tx_parity;
147 u64 tx_underrun;
148 u64 tx_sqe;
149 u64 tx_errors;
150};
151
152#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct ibm_emac_stats) + \
153 sizeof(struct ibm_emac_error_stats)) \
154 / sizeof(u64))
104 155
105struct ocp_enet_private { 156struct ocp_enet_private {
106 struct sk_buff *tx_skb[NUM_TX_BUFF]; 157 struct net_device *ndev; /* 0 */
107 struct sk_buff *rx_skb[NUM_RX_BUFF]; 158 struct emac_regs *emacp;
108 struct mal_descriptor *tx_desc; 159
109 struct mal_descriptor *rx_desc; 160 struct mal_descriptor *tx_desc;
110 struct mal_descriptor *rx_dirty; 161 int tx_cnt;
111 struct net_device_stats stats; 162 int tx_slot;
112 int tx_cnt; 163 int ack_slot;
113 int rx_slot; 164
114 int dirty_rx; 165 struct mal_descriptor *rx_desc;
115 int tx_slot; 166 int rx_slot;
116 int ack_slot; 167 struct sk_buff *rx_sg_skb; /* 1 */
117 int rx_buffer_size; 168 int rx_skb_size;
118 169 int rx_sync_size;
119 struct mii_phy phy_mii; 170
120 int mii_phy_addr; 171 struct ibm_emac_stats stats;
121 int want_autoneg; 172 struct ocp_device *tah_dev;
122 int timer_ticks; 173
123 struct timer_list link_timer; 174 struct ibm_ocp_mal *mal;
124 struct net_device *mdio_dev; 175 struct mal_commac commac;
125 176
126 struct ocp_device *rgmii_dev; 177 struct sk_buff *tx_skb[NUM_TX_BUFF];
127 int rgmii_input; 178 struct sk_buff *rx_skb[NUM_RX_BUFF];
128 179
129 struct ocp_device *zmii_dev; 180 struct ocp_device *zmii_dev;
130 int zmii_input; 181 int zmii_input;
131 182 struct ocp_enet_private *mdio_dev;
132 struct ibm_ocp_mal *mal; 183 struct ocp_device *rgmii_dev;
133 int mal_tx_chan, mal_rx_chan; 184 int rgmii_input;
134 struct mal_commac commac; 185
135 186 struct ocp_def *def;
136 struct ocp_device *tah_dev; 187
137 188 struct mii_phy phy;
138 int opened; 189 struct timer_list link_timer;
139 int going_away; 190 int reset_failed;
140 int wol_irq; 191
141 emac_t *emacp; 192 struct ibm_emac_error_stats estats;
142 struct ocp_device *ocpdev; 193 struct net_device_stats nstats;
143 struct net_device *ndev; 194
144 spinlock_t lock; 195 struct device* ldev;
145}; 196};
146#endif /* _IBM_EMAC_CORE_H_ */ 197
198/* Ethtool get_regs complex data.
199 * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
200 * when available.
201 *
202 * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
203 * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
204 * Each register component is preceded with emac_ethtool_regs_subhdr.
205 * Order of the optional headers follows their relative bit posititions
206 * in emac_ethtool_regs_hdr.components
207 */
208#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
209#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
210#define EMAC_ETHTOOL_REGS_TAH 0x00000004
211
212struct emac_ethtool_regs_hdr {
213 u32 components;
214};
215
216struct emac_ethtool_regs_subhdr {
217 u32 version;
218 u32 index;
219};
220
221#endif /* __IBM_EMAC_CORE_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c
index c8512046cf84..75d3b8639041 100644
--- a/drivers/net/ibm_emac/ibm_emac_debug.c
+++ b/drivers/net/ibm_emac/ibm_emac_debug.c
@@ -1,224 +1,213 @@
1/* 1/*
2 * ibm_ocp_debug.c 2 * drivers/net/ibm_emac/ibm_emac_debug.c
3 * 3 *
4 * This has all the debug routines that where in *_enet.c 4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 * 5 *
6 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies
7 * April , 2002 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Copyright 2002 MontaVista Softare Inc.
10 * 8 *
11 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 12 * option) any later version.
13 *
15 */ 14 */
16
17#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/sysrq.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include "ibm_ocp_mal.h"
22#include "ibm_ocp_zmii.h"
23#include "ibm_ocp_enet.h"
24 22
25extern int emac_phy_read(struct net_device *dev, int mii_id, int reg); 23#include "ibm_emac_core.h"
24
25static void emac_desc_dump(int idx, struct ocp_enet_private *p)
26{
27 int i;
28 printk("** EMAC%d TX BDs **\n"
29 " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
30 idx, p->tx_cnt, p->tx_slot, p->ack_slot);
31 for (i = 0; i < NUM_TX_BUFF / 2; ++i)
32 printk
33 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
34 i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
35 p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
36 NUM_TX_BUFF / 2 + i,
37 p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
38 p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
39 p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
40 p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
41
42 printk("** EMAC%d RX BDs **\n"
43 " rx_slot = %d rx_stopped = %d rx_skb_size = %d rx_sync_size = %d\n"
44 " rx_sg_skb = 0x%p\n",
45 idx, p->rx_slot, p->commac.rx_stopped, p->rx_skb_size,
46 p->rx_sync_size, p->rx_sg_skb);
47 for (i = 0; i < NUM_RX_BUFF / 2; ++i)
48 printk
49 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
50 i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
51 p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
52 NUM_RX_BUFF / 2 + i,
53 p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
54 p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
55 p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
56 p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
57}
58
59static void emac_mac_dump(int idx, struct ocp_enet_private *dev)
60{
61 struct emac_regs *p = dev->emacp;
62
63 printk("** EMAC%d registers **\n"
64 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
65 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
66 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n"
67 "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x "
68 "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n"
69 "LSA = %04x%08x IPGVR = 0x%04x\n"
70 "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
71 "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n",
72 idx, in_be32(&p->mr0), in_be32(&p->mr1),
73 in_be32(&p->tmr0), in_be32(&p->tmr1),
74 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
75 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
76 in_be32(&p->vtci),
77 in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3),
78 in_be32(&p->iaht4),
79 in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3),
80 in_be32(&p->gaht4),
81 in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
82 in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
83 in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr)
84 );
85
86 emac_desc_dump(idx, dev);
87}
88
89static void emac_mal_dump(struct ibm_ocp_mal *mal)
90{
91 struct ocp_func_mal_data *maldata = mal->def->additions;
92 int i;
93
94 printk("** MAL%d Registers **\n"
95 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
96 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
97 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
98 mal->def->index,
99 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
100 get_mal_dcrn(mal, MAL_IER),
101 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
102 get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
103 get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
104 get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
105 );
106
107 printk("TX|");
108 for (i = 0; i < maldata->num_tx_chans; ++i) {
109 if (i && !(i % 4))
110 printk("\n ");
111 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
112 }
113 printk("\nRX|");
114 for (i = 0; i < maldata->num_rx_chans; ++i) {
115 if (i && !(i % 4))
116 printk("\n ");
117 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
118 }
119 printk("\n ");
120 for (i = 0; i < maldata->num_rx_chans; ++i) {
121 u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
122 if (i && !(i % 3))
123 printk("\n ");
124 printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
125 }
126 printk("\n");
127}
128
129static struct ocp_enet_private *__emacs[4];
130static struct ibm_ocp_mal *__mals[1];
26 131
27void emac_phy_dump(struct net_device *dev) 132void emac_dbg_register(int idx, struct ocp_enet_private *dev)
28{ 133{
29 struct ocp_enet_private *fep = dev->priv; 134 unsigned long flags;
30 unsigned long i; 135
31 uint data; 136 if (idx >= sizeof(__emacs) / sizeof(__emacs[0])) {
32 137 printk(KERN_WARNING
33 printk(KERN_DEBUG " Prepare for Phy dump....\n"); 138 "invalid index %d when registering EMAC for debugging\n",
34 for (i = 0; i < 0x1A; i++) { 139 idx);
35 data = emac_phy_read(dev, fep->mii_phy_addr, i); 140 return;
36 printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data);
37 if (i == 0x07)
38 i = 0x0f;
39 } 141 }
142
143 local_irq_save(flags);
144 __emacs[idx] = dev;
145 local_irq_restore(flags);
40} 146}
41 147
42void emac_desc_dump(struct net_device *dev) 148void mal_dbg_register(int idx, struct ibm_ocp_mal *mal)
43{ 149{
44 struct ocp_enet_private *fep = dev->priv; 150 unsigned long flags;
45 int curr_slot; 151
46 152 if (idx >= sizeof(__mals) / sizeof(__mals[0])) {
47 printk(KERN_DEBUG 153 printk(KERN_WARNING
48 "dumping the receive descriptors: current slot is %d\n", 154 "invalid index %d when registering MAL for debugging\n",
49 fep->rx_slot); 155 idx);
50 for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) { 156 return;
51 printk(KERN_DEBUG
52 "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n",
53 curr_slot, fep->rx_desc[curr_slot].ctrl,
54 fep->rx_desc[curr_slot].data_len,
55 (unsigned int)fep->rx_desc[curr_slot].data_ptr);
56 } 157 }
158
159 local_irq_save(flags);
160 __mals[idx] = mal;
161 local_irq_restore(flags);
57} 162}
58 163
59void emac_mac_dump(struct net_device *dev) 164void emac_dbg_dump_all(void)
60{ 165{
61 struct ocp_enet_private *fep = dev->priv; 166 unsigned int i;
62 volatile emac_t *emacp = fep->emacp; 167 unsigned long flags;
63 168
64 printk(KERN_DEBUG "EMAC DEBUG ********** \n"); 169 local_irq_save(flags);
65 printk(KERN_DEBUG "EMAC_M0 ==> 0x%x\n", in_be32(&emacp->em0mr0)); 170
66 printk(KERN_DEBUG "EMAC_M1 ==> 0x%x\n", in_be32(&emacp->em0mr1)); 171 for (i = 0; i < sizeof(__mals) / sizeof(__mals[0]); ++i)
67 printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0)); 172 if (__mals[i])
68 printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1)); 173 emac_mal_dump(__mals[i]);
69 printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr)); 174
70 printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr)); 175 for (i = 0; i < sizeof(__emacs) / sizeof(__emacs[0]); ++i)
71 printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser)); 176 if (__emacs[i])
72 printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr)); 177 emac_mac_dump(i, __emacs[i]);
73 printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr)); 178
74 printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n", 179 local_irq_restore(flags);
75 in_be32(&emacp->em0vtpid));
76} 180}
77 181
78void emac_mal_dump(struct net_device *dev) 182#if defined(CONFIG_MAGIC_SYSRQ)
183static void emac_sysrq_handler(int key, struct pt_regs *pt_regs,
184 struct tty_struct *tty)
79{ 185{
80 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 186 emac_dbg_dump_all();
81
82 printk(KERN_DEBUG " MAL DEBUG ********** \n");
83 printk(KERN_DEBUG " MCR ==> 0x%x\n",
84 (unsigned int)get_mal_dcrn(mal, DCRN_MALCR));
85 printk(KERN_DEBUG " ESR ==> 0x%x\n",
86 (unsigned int)get_mal_dcrn(mal, DCRN_MALESR));
87 printk(KERN_DEBUG " IER ==> 0x%x\n",
88 (unsigned int)get_mal_dcrn(mal, DCRN_MALIER));
89#ifdef CONFIG_40x
90 printk(KERN_DEBUG " DBR ==> 0x%x\n",
91 (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR));
92#endif /* CONFIG_40x */
93 printk(KERN_DEBUG " TXCASR ==> 0x%x\n",
94 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR));
95 printk(KERN_DEBUG " TXCARR ==> 0x%x\n",
96 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR));
97 printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n",
98 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR));
99 printk(KERN_DEBUG " TXDEIR ==> 0x%x\n",
100 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR));
101 printk(KERN_DEBUG " RXCASR ==> 0x%x\n",
102 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR));
103 printk(KERN_DEBUG " RXCARR ==> 0x%x\n",
104 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR));
105 printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n",
106 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR));
107 printk(KERN_DEBUG " RXDEIR ==> 0x%x\n",
108 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR));
109 printk(KERN_DEBUG " TXCTP0R ==> 0x%x\n",
110 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R));
111 printk(KERN_DEBUG " TXCTP1R ==> 0x%x\n",
112 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R));
113 printk(KERN_DEBUG " TXCTP2R ==> 0x%x\n",
114 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R));
115 printk(KERN_DEBUG " TXCTP3R ==> 0x%x\n",
116 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R));
117 printk(KERN_DEBUG " RXCTP0R ==> 0x%x\n",
118 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R));
119 printk(KERN_DEBUG " RXCTP1R ==> 0x%x\n",
120 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R));
121 printk(KERN_DEBUG " RCBS0 ==> 0x%x\n",
122 (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0));
123 printk(KERN_DEBUG " RCBS1 ==> 0x%x\n",
124 (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1));
125} 187}
126 188
127void emac_serr_dump_0(struct net_device *dev) 189static struct sysrq_key_op emac_sysrq_op = {
190 .handler = emac_sysrq_handler,
191 .help_msg = "emaC",
192 .action_msg = "Show EMAC(s) status",
193};
194
195int __init emac_init_debug(void)
128{ 196{
129 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 197 return register_sysrq_key('c', &emac_sysrq_op);
130 unsigned long int mal_error, plb_error, plb_addr;
131
132 mal_error = get_mal_dcrn(mal, DCRN_MALESR);
133 printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n",
134 (mal_error & 0x40000000) ? "Receive" :
135 "Transmit", (mal_error & 0x3e000000) >> 25);
136 printk(KERN_DEBUG " ----- latched error -----\n");
137 if (mal_error & MALESR_DE)
138 printk(KERN_DEBUG " DE: descriptor error\n");
139 if (mal_error & MALESR_OEN)
140 printk(KERN_DEBUG " ONE: OPB non-fullword error\n");
141 if (mal_error & MALESR_OTE)
142 printk(KERN_DEBUG " OTE: OPB timeout error\n");
143 if (mal_error & MALESR_OSE)
144 printk(KERN_DEBUG " OSE: OPB slave error\n");
145
146 if (mal_error & MALESR_PEIN) {
147 plb_error = mfdcr(DCRN_PLB0_BESR);
148 printk(KERN_DEBUG
149 " PEIN: PLB error, PLB0_BESR is 0x%x\n",
150 (unsigned int)plb_error);
151 plb_addr = mfdcr(DCRN_PLB0_BEAR);
152 printk(KERN_DEBUG
153 " PEIN: PLB error, PLB0_BEAR is 0x%x\n",
154 (unsigned int)plb_addr);
155 }
156} 198}
157 199
158void emac_serr_dump_1(struct net_device *dev) 200void __exit emac_fini_debug(void)
159{ 201{
160 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 202 unregister_sysrq_key('c', &emac_sysrq_op);
161 int mal_error = get_mal_dcrn(mal, DCRN_MALESR);
162
163 printk(KERN_DEBUG " ----- cumulative errors -----\n");
164 if (mal_error & MALESR_DEI)
165 printk(KERN_DEBUG " DEI: descriptor error interrupt\n");
166 if (mal_error & MALESR_ONEI)
167 printk(KERN_DEBUG " OPB non-fullword error interrupt\n");
168 if (mal_error & MALESR_OTEI)
169 printk(KERN_DEBUG " OTEI: timeout error interrupt\n");
170 if (mal_error & MALESR_OSEI)
171 printk(KERN_DEBUG " OSEI: slave error interrupt\n");
172 if (mal_error & MALESR_PBEI)
173 printk(KERN_DEBUG " PBEI: PLB bus error interrupt\n");
174} 203}
175 204
176void emac_err_dump(struct net_device *dev, int em0isr) 205#else
206int __init emac_init_debug(void)
207{
208 return 0;
209}
210void __exit emac_fini_debug(void)
177{ 211{
178 printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name);
179
180 if (em0isr & EMAC_ISR_OVR)
181 printk(KERN_DEBUG " OVR: overrun\n");
182 if (em0isr & EMAC_ISR_PP)
183 printk(KERN_DEBUG " PP: control pause packet\n");
184 if (em0isr & EMAC_ISR_BP)
185 printk(KERN_DEBUG " BP: packet error\n");
186 if (em0isr & EMAC_ISR_RP)
187 printk(KERN_DEBUG " RP: runt packet\n");
188 if (em0isr & EMAC_ISR_SE)
189 printk(KERN_DEBUG " SE: short event\n");
190 if (em0isr & EMAC_ISR_ALE)
191 printk(KERN_DEBUG " ALE: odd number of nibbles in packet\n");
192 if (em0isr & EMAC_ISR_BFCS)
193 printk(KERN_DEBUG " BFCS: bad FCS\n");
194 if (em0isr & EMAC_ISR_PTLE)
195 printk(KERN_DEBUG " PTLE: oversized packet\n");
196 if (em0isr & EMAC_ISR_ORE)
197 printk(KERN_DEBUG
198 " ORE: packet length field > max allowed LLC\n");
199 if (em0isr & EMAC_ISR_IRE)
200 printk(KERN_DEBUG " IRE: In Range error\n");
201 if (em0isr & EMAC_ISR_DBDM)
202 printk(KERN_DEBUG " DBDM: xmit error or SQE\n");
203 if (em0isr & EMAC_ISR_DB0)
204 printk(KERN_DEBUG " DB0: xmit error or SQE on TX channel 0\n");
205 if (em0isr & EMAC_ISR_SE0)
206 printk(KERN_DEBUG
207 " SE0: Signal Quality Error test failure from TX channel 0\n");
208 if (em0isr & EMAC_ISR_TE0)
209 printk(KERN_DEBUG " TE0: xmit channel 0 aborted\n");
210 if (em0isr & EMAC_ISR_DB1)
211 printk(KERN_DEBUG " DB1: xmit error or SQE on TX channel \n");
212 if (em0isr & EMAC_ISR_SE1)
213 printk(KERN_DEBUG
214 " SE1: Signal Quality Error test failure from TX channel 1\n");
215 if (em0isr & EMAC_ISR_TE1)
216 printk(KERN_DEBUG " TE1: xmit channel 1 aborted\n");
217 if (em0isr & EMAC_ISR_MOS)
218 printk(KERN_DEBUG " MOS\n");
219 if (em0isr & EMAC_ISR_MOF)
220 printk(KERN_DEBUG " MOF\n");
221
222 emac_mac_dump(dev);
223 emac_mal_dump(dev);
224} 212}
213#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.h b/drivers/net/ibm_emac/ibm_emac_debug.h
new file mode 100644
index 000000000000..e85fbe0a8da9
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_debug.h
@@ -0,0 +1,63 @@
1/*
2 * drivers/net/ibm_emac/ibm_ocp_debug.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15#ifndef __IBM_EMAC_DEBUG_H_
16#define __IBM_EMAC_DEBUG_H_
17
18#include <linux/config.h>
19#include <linux/init.h>
20#include "ibm_emac_core.h"
21#include "ibm_emac_mal.h"
22
23#if defined(CONFIG_IBM_EMAC_DEBUG)
24void emac_dbg_register(int idx, struct ocp_enet_private *dev);
25void mal_dbg_register(int idx, struct ibm_ocp_mal *mal);
26int emac_init_debug(void) __init;
27void emac_fini_debug(void) __exit;
28void emac_dbg_dump_all(void);
29# define DBG_LEVEL 1
30#else
31# define emac_dbg_register(x,y) ((void)0)
32# define mal_dbg_register(x,y) ((void)0)
33# define emac_init_debug() ((void)0)
34# define emac_fini_debug() ((void)0)
35# define emac_dbg_dump_all() ((void)0)
36# define DBG_LEVEL 0
37#endif
38
39#if DBG_LEVEL > 0
40# define DBG(f,x...) printk("emac" f, ##x)
41# define MAL_DBG(f,x...) printk("mal" f, ##x)
42# define ZMII_DBG(f,x...) printk("zmii" f, ##x)
43# define RGMII_DBG(f,x...) printk("rgmii" f, ##x)
44# define NL "\n"
45#else
46# define DBG(f,x...) ((void)0)
47# define MAL_DBG(f,x...) ((void)0)
48# define ZMII_DBG(f,x...) ((void)0)
49# define RGMII_DBG(f,x...) ((void)0)
50#endif
51#if DBG_LEVEL > 1
52# define DBG2(f,x...) DBG(f, ##x)
53# define MAL_DBG2(f,x...) MAL_DBG(f, ##x)
54# define ZMII_DBG2(f,x...) ZMII_DBG(f, ##x)
55# define RGMII_DBG2(f,x...) RGMII_DBG(f, ##x)
56#else
57# define DBG2(f,x...) ((void)0)
58# define MAL_DBG2(f,x...) ((void)0)
59# define ZMII_DBG2(f,x...) ((void)0)
60# define RGMII_DBG2(f,x...) ((void)0)
61#endif
62
63#endif /* __IBM_EMAC_DEBUG_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
index e59f57f363ca..da88d43081cc 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.c
+++ b/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -1,436 +1,565 @@
1/* 1/*
2 * ibm_ocp_mal.c 2 * drivers/net/ibm_emac/ibm_emac_mal.c
3 * 3 *
4 * Armin Kuster akuster@mvista.com 4 * Memory Access Layer (MAL) support
5 * Juen, 2002 5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
6 * 8 *
7 * Copyright 2002 MontaVista Softare Inc. 9 * Based on original work by
10 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
11 * David Gibson <hermes@gibson.dropbear.id.au>,
12 *
13 * Armin Kuster <akuster@mvista.com>
14 * Copyright 2002 MontaVista Softare Inc.
8 * 15 *
9 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your 18 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 19 * option) any later version.
20 *
13 */ 21 */
14
15#include <linux/config.h> 22#include <linux/config.h>
16#include <linux/module.h> 23#include <linux/module.h>
17#include <linux/kernel.h> 24#include <linux/kernel.h>
18#include <linux/errno.h> 25#include <linux/errno.h>
19#include <linux/netdevice.h> 26#include <linux/netdevice.h>
20#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h>
21#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
22 30
23#include <asm/io.h>
24#include <asm/irq.h>
25#include <asm/ocp.h> 31#include <asm/ocp.h>
26 32
33#include "ibm_emac_core.h"
27#include "ibm_emac_mal.h" 34#include "ibm_emac_mal.h"
35#include "ibm_emac_debug.h"
28 36
29// Locking: Should we share a lock with the client ? The client could provide 37int __init mal_register_commac(struct ibm_ocp_mal *mal,
30// a lock pointer (optionally) in the commac structure... I don't think this is 38 struct mal_commac *commac)
31// really necessary though
32
33/* This lock protects the commac list. On today UP implementations, it's
34 * really only used as IRQ protection in mal_{register,unregister}_commac()
35 */
36static DEFINE_RWLOCK(mal_list_lock);
37
38int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
39{ 39{
40 unsigned long flags; 40 unsigned long flags;
41 local_irq_save(flags);
41 42
42 write_lock_irqsave(&mal_list_lock, flags); 43 MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index,
44 commac->tx_chan_mask, commac->rx_chan_mask);
43 45
44 /* Don't let multiple commacs claim the same channel */ 46 /* Don't let multiple commacs claim the same channel(s) */
45 if ((mal->tx_chan_mask & commac->tx_chan_mask) || 47 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
46 (mal->rx_chan_mask & commac->rx_chan_mask)) { 48 (mal->rx_chan_mask & commac->rx_chan_mask)) {
47 write_unlock_irqrestore(&mal_list_lock, flags); 49 local_irq_restore(flags);
50 printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51 mal->def->index);
48 return -EBUSY; 52 return -EBUSY;
49 } 53 }
50 54
51 mal->tx_chan_mask |= commac->tx_chan_mask; 55 mal->tx_chan_mask |= commac->tx_chan_mask;
52 mal->rx_chan_mask |= commac->rx_chan_mask; 56 mal->rx_chan_mask |= commac->rx_chan_mask;
57 list_add(&commac->list, &mal->list);
53 58
54 list_add(&commac->list, &mal->commac); 59 local_irq_restore(flags);
55
56 write_unlock_irqrestore(&mal_list_lock, flags);
57
58 return 0; 60 return 0;
59} 61}
60 62
61int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) 63void __exit mal_unregister_commac(struct ibm_ocp_mal *mal,
64 struct mal_commac *commac)
62{ 65{
63 unsigned long flags; 66 unsigned long flags;
67 local_irq_save(flags);
64 68
65 write_lock_irqsave(&mal_list_lock, flags); 69 MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index,
70 commac->tx_chan_mask, commac->rx_chan_mask);
66 71
67 mal->tx_chan_mask &= ~commac->tx_chan_mask; 72 mal->tx_chan_mask &= ~commac->tx_chan_mask;
68 mal->rx_chan_mask &= ~commac->rx_chan_mask; 73 mal->rx_chan_mask &= ~commac->rx_chan_mask;
69
70 list_del_init(&commac->list); 74 list_del_init(&commac->list);
71 75
72 write_unlock_irqrestore(&mal_list_lock, flags); 76 local_irq_restore(flags);
73
74 return 0;
75} 77}
76 78
77int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) 79int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
78{ 80{
79 switch (channel) { 81 struct ocp_func_mal_data *maldata = mal->def->additions;
80 case 0: 82 BUG_ON(channel < 0 || channel >= maldata->num_rx_chans ||
81 set_mal_dcrn(mal, DCRN_MALRCBS0, size); 83 size > MAL_MAX_RX_SIZE);
82 break; 84
83#ifdef DCRN_MALRCBS1 85 MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size);
84 case 1: 86
85 set_mal_dcrn(mal, DCRN_MALRCBS1, size); 87 if (size & 0xf) {
86 break; 88 printk(KERN_WARNING
87#endif 89 "mal%d: incorrect RX size %lu for the channel %d\n",
88#ifdef DCRN_MALRCBS2 90 mal->def->index, size, channel);
89 case 2:
90 set_mal_dcrn(mal, DCRN_MALRCBS2, size);
91 break;
92#endif
93#ifdef DCRN_MALRCBS3
94 case 3:
95 set_mal_dcrn(mal, DCRN_MALRCBS3, size);
96 break;
97#endif
98 default:
99 return -EINVAL; 91 return -EINVAL;
100 } 92 }
101 93
94 set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
102 return 0; 95 return 0;
103} 96}
104 97
105static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs) 98int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel)
106{ 99{
107 struct ibm_ocp_mal *mal = dev_instance; 100 struct ocp_func_mal_data *maldata = mal->def->additions;
108 unsigned long mal_error; 101 BUG_ON(channel < 0 || channel >= maldata->num_tx_chans);
102 return channel * NUM_TX_BUFF;
103}
109 104
110 /* 105int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel)
111 * This SERR applies to one of the devices on the MAL, here we charge 106{
112 * it against the first EMAC registered for the MAL. 107 struct ocp_func_mal_data *maldata = mal->def->additions;
113 */ 108 BUG_ON(channel < 0 || channel >= maldata->num_rx_chans);
109 return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
110}
114 111
115 mal_error = get_mal_dcrn(mal, DCRN_MALESR); 112void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel)
113{
114 local_bh_disable();
115 MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel);
116 set_mal_dcrn(mal, MAL_TXCASR,
117 get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
118 local_bh_enable();
119}
116 120
117 printk(KERN_ERR "%s: System Error (MALESR=%lx)\n", 121void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel)
118 "MAL" /* FIXME: get the name right */ , mal_error); 122{
123 set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
124 MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel);
125}
119 126
120 /* FIXME: decipher error */ 127void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel)
121 /* DIXME: distribute to commacs, if possible */ 128{
129 local_bh_disable();
130 MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel);
131 set_mal_dcrn(mal, MAL_RXCASR,
132 get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
133 local_bh_enable();
134}
122 135
123 /* Clear the error status register */ 136void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel)
124 set_mal_dcrn(mal, DCRN_MALESR, mal_error); 137{
138 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
139 MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel);
140}
125 141
126 return IRQ_HANDLED; 142void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac)
143{
144 local_bh_disable();
145 MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac);
146 list_add_tail(&commac->poll_list, &mal->poll_list);
147 local_bh_enable();
127} 148}
128 149
129static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) 150void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac)
151{
152 local_bh_disable();
153 MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac);
154 list_del(&commac->poll_list);
155 local_bh_enable();
156}
157
158/* synchronized by mal_poll() */
159static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal)
160{
161 MAL_DBG2("%d: enable_irq" NL, mal->def->index);
162 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
163}
164
165/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */
166static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal)
167{
168 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
169 MAL_DBG2("%d: disable_irq" NL, mal->def->index);
170}
171
172static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
130{ 173{
131 struct ibm_ocp_mal *mal = dev_instance; 174 struct ibm_ocp_mal *mal = dev_instance;
132 struct list_head *l; 175 u32 esr = get_mal_dcrn(mal, MAL_ESR);
133 unsigned long isr;
134 176
135 isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR); 177 /* Clear the error status register */
136 set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr); 178 set_mal_dcrn(mal, MAL_ESR, esr);
137 179
138 read_lock(&mal_list_lock); 180 MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr);
139 list_for_each(l, &mal->commac) {
140 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
141 181
142 if (isr & mc->tx_chan_mask) { 182 if (esr & MAL_ESR_EVB) {
143 mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask); 183 if (esr & MAL_ESR_DE) {
184 /* We ignore Descriptor error,
185 * TXDE or RXDE interrupt will be generated anyway.
186 */
187 return IRQ_HANDLED;
144 } 188 }
189
190 if (esr & MAL_ESR_PEIN) {
191 /* PLB error, it's probably buggy hardware or
192 * incorrect physical address in BD (i.e. bug)
193 */
194 if (net_ratelimit())
195 printk(KERN_ERR
196 "mal%d: system error, PLB (ESR = 0x%08x)\n",
197 mal->def->index, esr);
198 return IRQ_HANDLED;
199 }
200
201 /* OPB error, it's probably buggy hardware or incorrect EBC setup */
202 if (net_ratelimit())
203 printk(KERN_ERR
204 "mal%d: system error, OPB (ESR = 0x%08x)\n",
205 mal->def->index, esr);
145 } 206 }
146 read_unlock(&mal_list_lock); 207 return IRQ_HANDLED;
208}
209
210static inline void mal_schedule_poll(struct ibm_ocp_mal *mal)
211{
212 if (likely(netif_rx_schedule_prep(&mal->poll_dev))) {
213 MAL_DBG2("%d: schedule_poll" NL, mal->def->index);
214 mal_disable_eob_irq(mal);
215 __netif_rx_schedule(&mal->poll_dev);
216 } else
217 MAL_DBG2("%d: already in poll" NL, mal->def->index);
218}
147 219
220static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
221{
222 struct ibm_ocp_mal *mal = dev_instance;
223 u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
224 MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r);
225 mal_schedule_poll(mal);
226 set_mal_dcrn(mal, MAL_TXEOBISR, r);
148 return IRQ_HANDLED; 227 return IRQ_HANDLED;
149} 228}
150 229
151static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs) 230static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
152{ 231{
153 struct ibm_ocp_mal *mal = dev_instance; 232 struct ibm_ocp_mal *mal = dev_instance;
154 struct list_head *l; 233 u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
155 unsigned long isr; 234 MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r);
235 mal_schedule_poll(mal);
236 set_mal_dcrn(mal, MAL_RXEOBISR, r);
237 return IRQ_HANDLED;
238}
156 239
157 isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR); 240static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
158 set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr); 241{
242 struct ibm_ocp_mal *mal = dev_instance;
243 u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
244 set_mal_dcrn(mal, MAL_TXDEIR, deir);
159 245
160 read_lock(&mal_list_lock); 246 MAL_DBG("%d: txde %08x" NL, mal->def->index, deir);
161 list_for_each(l, &mal->commac) {
162 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
163 247
164 if (isr & mc->rx_chan_mask) { 248 if (net_ratelimit())
165 mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask); 249 printk(KERN_ERR
166 } 250 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
167 } 251 mal->def->index, deir);
168 read_unlock(&mal_list_lock);
169 252
170 return IRQ_HANDLED; 253 return IRQ_HANDLED;
171} 254}
172 255
173static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) 256static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
174{ 257{
175 struct ibm_ocp_mal *mal = dev_instance; 258 struct ibm_ocp_mal *mal = dev_instance;
176 struct list_head *l; 259 struct list_head *l;
177 unsigned long deir; 260 u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
178 261
179 deir = get_mal_dcrn(mal, DCRN_MALTXDEIR); 262 MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir);
180 263
181 /* FIXME: print which MAL correctly */ 264 list_for_each(l, &mal->list) {
182 printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
183 "MAL", deir);
184
185 read_lock(&mal_list_lock);
186 list_for_each(l, &mal->commac) {
187 struct mal_commac *mc = list_entry(l, struct mal_commac, list); 265 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
188 266 if (deir & mc->rx_chan_mask) {
189 if (deir & mc->tx_chan_mask) { 267 mc->rx_stopped = 1;
190 mc->ops->txde(mc->dev, deir & mc->tx_chan_mask); 268 mc->ops->rxde(mc->dev);
191 } 269 }
192 } 270 }
193 read_unlock(&mal_list_lock); 271
272 mal_schedule_poll(mal);
273 set_mal_dcrn(mal, MAL_RXDEIR, deir);
194 274
195 return IRQ_HANDLED; 275 return IRQ_HANDLED;
196} 276}
197 277
198/* 278static int mal_poll(struct net_device *ndev, int *budget)
199 * This interrupt should be very rare at best. This occurs when
200 * the hardware has a problem with the receive descriptors. The manual
201 * states that it occurs when the hardware cannot the receive descriptor
202 * empty bit is not set. The recovery mechanism will be to
203 * traverse through the descriptors, handle any that are marked to be
204 * handled and reinitialize each along the way. At that point the driver
205 * will be restarted.
206 */
207static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
208{ 279{
209 struct ibm_ocp_mal *mal = dev_instance; 280 struct ibm_ocp_mal *mal = ndev->priv;
210 struct list_head *l; 281 struct list_head *l;
211 unsigned long deir; 282 int rx_work_limit = min(ndev->quota, *budget), received = 0, done;
212 283
213 deir = get_mal_dcrn(mal, DCRN_MALRXDEIR); 284 MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget,
285 rx_work_limit);
286 again:
287 /* Process TX skbs */
288 list_for_each(l, &mal->poll_list) {
289 struct mal_commac *mc =
290 list_entry(l, struct mal_commac, poll_list);
291 mc->ops->poll_tx(mc->dev);
292 }
214 293
215 /* 294 /* Process RX skbs.
216 * This really is needed. This case encountered in stress testing. 295 * We _might_ need something more smart here to enforce polling fairness.
217 */ 296 */
218 if (deir == 0) 297 list_for_each(l, &mal->poll_list) {
219 return IRQ_HANDLED; 298 struct mal_commac *mc =
220 299 list_entry(l, struct mal_commac, poll_list);
221 /* FIXME: print which MAL correctly */ 300 int n = mc->ops->poll_rx(mc->dev, rx_work_limit);
222 printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n", 301 if (n) {
223 "MAL", deir); 302 received += n;
224 303 rx_work_limit -= n;
225 read_lock(&mal_list_lock); 304 if (rx_work_limit <= 0) {
226 list_for_each(l, &mal->commac) { 305 done = 0;
227 struct mal_commac *mc = list_entry(l, struct mal_commac, list); 306 goto more_work; // XXX What if this is the last one ?
307 }
308 }
309 }
228 310
229 if (deir & mc->rx_chan_mask) { 311 /* We need to disable IRQs to protect from RXDE IRQ here */
230 mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask); 312 local_irq_disable();
313 __netif_rx_complete(ndev);
314 mal_enable_eob_irq(mal);
315 local_irq_enable();
316
317 done = 1;
318
319 /* Check for "rotting" packet(s) */
320 list_for_each(l, &mal->poll_list) {
321 struct mal_commac *mc =
322 list_entry(l, struct mal_commac, poll_list);
323 if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) {
324 MAL_DBG2("%d: rotting packet" NL, mal->def->index);
325 if (netif_rx_reschedule(ndev, received))
326 mal_disable_eob_irq(mal);
327 else
328 MAL_DBG2("%d: already in poll list" NL,
329 mal->def->index);
330
331 if (rx_work_limit > 0)
332 goto again;
333 else
334 goto more_work;
231 } 335 }
336 mc->ops->poll_tx(mc->dev);
232 } 337 }
233 read_unlock(&mal_list_lock);
234 338
235 return IRQ_HANDLED; 339 more_work:
340 ndev->quota -= received;
341 *budget -= received;
342
343 MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget,
344 done ? 0 : 1);
345 return done ? 0 : 1;
346}
347
348static void mal_reset(struct ibm_ocp_mal *mal)
349{
350 int n = 10;
351 MAL_DBG("%d: reset" NL, mal->def->index);
352
353 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
354
355 /* Wait for reset to complete (1 system clock) */
356 while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
357 --n;
358
359 if (unlikely(!n))
360 printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index);
361}
362
363int mal_get_regs_len(struct ibm_ocp_mal *mal)
364{
365 return sizeof(struct emac_ethtool_regs_subhdr) +
366 sizeof(struct ibm_mal_regs);
367}
368
369void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf)
370{
371 struct emac_ethtool_regs_subhdr *hdr = buf;
372 struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1);
373 struct ocp_func_mal_data *maldata = mal->def->additions;
374 int i;
375
376 hdr->version = MAL_VERSION;
377 hdr->index = mal->def->index;
378
379 regs->tx_count = maldata->num_tx_chans;
380 regs->rx_count = maldata->num_rx_chans;
381
382 regs->cfg = get_mal_dcrn(mal, MAL_CFG);
383 regs->esr = get_mal_dcrn(mal, MAL_ESR);
384 regs->ier = get_mal_dcrn(mal, MAL_IER);
385 regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
386 regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
387 regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
388 regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
389 regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
390 regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
391 regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
392 regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
393
394 for (i = 0; i < regs->tx_count; ++i)
395 regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
396
397 for (i = 0; i < regs->rx_count; ++i) {
398 regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
399 regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
400 }
401 return regs + 1;
236} 402}
237 403
238static int __init mal_probe(struct ocp_device *ocpdev) 404static int __init mal_probe(struct ocp_device *ocpdev)
239{ 405{
240 struct ibm_ocp_mal *mal = NULL; 406 struct ibm_ocp_mal *mal;
241 struct ocp_func_mal_data *maldata; 407 struct ocp_func_mal_data *maldata;
242 int err = 0; 408 int err = 0, i, bd_size;
409
410 MAL_DBG("%d: probe" NL, ocpdev->def->index);
243 411
244 maldata = (struct ocp_func_mal_data *)ocpdev->def->additions; 412 maldata = ocpdev->def->additions;
245 if (maldata == NULL) { 413 if (maldata == NULL) {
246 printk(KERN_ERR "mal%d: Missing additional datas !\n", 414 printk(KERN_ERR "mal%d: missing additional data!\n",
247 ocpdev->def->index); 415 ocpdev->def->index);
248 return -ENODEV; 416 return -ENODEV;
249 } 417 }
250 418
251 mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); 419 mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
252 if (mal == NULL) { 420 if (!mal) {
253 printk(KERN_ERR 421 printk(KERN_ERR
254 "mal%d: Out of memory allocating MAL structure !\n", 422 "mal%d: out of memory allocating MAL structure!\n",
255 ocpdev->def->index); 423 ocpdev->def->index);
256 return -ENOMEM; 424 return -ENOMEM;
257 } 425 }
258 memset(mal, 0, sizeof(*mal)); 426 mal->dcrbase = maldata->dcr_base;
259 427 mal->def = ocpdev->def;
260 switch (ocpdev->def->index) {
261 case 0:
262 mal->dcrbase = DCRN_MAL_BASE;
263 break;
264#ifdef DCRN_MAL1_BASE
265 case 1:
266 mal->dcrbase = DCRN_MAL1_BASE;
267 break;
268#endif
269 default:
270 BUG();
271 }
272
273 /**************************/
274 428
275 INIT_LIST_HEAD(&mal->commac); 429 INIT_LIST_HEAD(&mal->poll_list);
430 set_bit(__LINK_STATE_START, &mal->poll_dev.state);
431 mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT;
432 mal->poll_dev.poll = mal_poll;
433 mal->poll_dev.priv = mal;
434 atomic_set(&mal->poll_dev.refcnt, 1);
276 435
277 set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF); 436 INIT_LIST_HEAD(&mal->list);
278 set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
279 437
280 set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */ 438 /* Load power-on reset defaults */
281 /* FIXME: Add delay */ 439 mal_reset(mal);
282 440
283 /* Set the MAL configuration register */ 441 /* Set the MAL configuration register */
284 set_mal_dcrn(mal, DCRN_MALCR, 442 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB |
285 MALCR_PLBB | MALCR_OPBBL | MALCR_LEA | 443 MAL_CFG_OPBBL | MAL_CFG_LEA);
286 MALCR_PLBLT_DEFAULT); 444
287 445 mal_enable_eob_irq(mal);
288 /* It would be nice to allocate buffers separately for each 446
289 * channel, but we can't because the channels share the upper 447 /* Allocate space for BD rings */
290 * 13 bits of address lines. Each channels buffer must also 448 BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32);
291 * be 4k aligned, so we allocate 4k for each channel. This is 449 BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32);
292 * inefficient FIXME: do better, if possible */ 450 bd_size = sizeof(struct mal_descriptor) *
293 mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev, 451 (NUM_TX_BUFF * maldata->num_tx_chans +
294 MAL_DT_ALIGN * 452 NUM_RX_BUFF * maldata->num_rx_chans);
295 maldata->num_tx_chans, 453 mal->bd_virt =
296 &mal->tx_phys_addr, GFP_KERNEL); 454 dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL);
297 if (mal->tx_virt_addr == NULL) { 455
456 if (!mal->bd_virt) {
298 printk(KERN_ERR 457 printk(KERN_ERR
299 "mal%d: Out of memory allocating MAL descriptors !\n", 458 "mal%d: out of memory allocating RX/TX descriptors!\n",
300 ocpdev->def->index); 459 mal->def->index);
301 err = -ENOMEM; 460 err = -ENOMEM;
302 goto fail; 461 goto fail;
303 } 462 }
463 memset(mal->bd_virt, 0, bd_size);
304 464
305 /* God, oh, god, I hate DCRs */ 465 for (i = 0; i < maldata->num_tx_chans; ++i)
306 set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr); 466 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
307#ifdef DCRN_MALTXCTP1R 467 sizeof(struct mal_descriptor) *
308 if (maldata->num_tx_chans > 1) 468 mal_tx_bd_offset(mal, i));
309 set_mal_dcrn(mal, DCRN_MALTXCTP1R, 469
310 mal->tx_phys_addr + MAL_DT_ALIGN); 470 for (i = 0; i < maldata->num_rx_chans; ++i)
311#endif /* DCRN_MALTXCTP1R */ 471 set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
312#ifdef DCRN_MALTXCTP2R 472 sizeof(struct mal_descriptor) *
313 if (maldata->num_tx_chans > 2) 473 mal_rx_bd_offset(mal, i));
314 set_mal_dcrn(mal, DCRN_MALTXCTP2R,
315 mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
316#endif /* DCRN_MALTXCTP2R */
317#ifdef DCRN_MALTXCTP3R
318 if (maldata->num_tx_chans > 3)
319 set_mal_dcrn(mal, DCRN_MALTXCTP3R,
320 mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
321#endif /* DCRN_MALTXCTP3R */
322#ifdef DCRN_MALTXCTP4R
323 if (maldata->num_tx_chans > 4)
324 set_mal_dcrn(mal, DCRN_MALTXCTP4R,
325 mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
326#endif /* DCRN_MALTXCTP4R */
327#ifdef DCRN_MALTXCTP5R
328 if (maldata->num_tx_chans > 5)
329 set_mal_dcrn(mal, DCRN_MALTXCTP5R,
330 mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
331#endif /* DCRN_MALTXCTP5R */
332#ifdef DCRN_MALTXCTP6R
333 if (maldata->num_tx_chans > 6)
334 set_mal_dcrn(mal, DCRN_MALTXCTP6R,
335 mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
336#endif /* DCRN_MALTXCTP6R */
337#ifdef DCRN_MALTXCTP7R
338 if (maldata->num_tx_chans > 7)
339 set_mal_dcrn(mal, DCRN_MALTXCTP7R,
340 mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
341#endif /* DCRN_MALTXCTP7R */
342
343 mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
344 MAL_DT_ALIGN *
345 maldata->num_rx_chans,
346 &mal->rx_phys_addr, GFP_KERNEL);
347
348 set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
349#ifdef DCRN_MALRXCTP1R
350 if (maldata->num_rx_chans > 1)
351 set_mal_dcrn(mal, DCRN_MALRXCTP1R,
352 mal->rx_phys_addr + MAL_DT_ALIGN);
353#endif /* DCRN_MALRXCTP1R */
354#ifdef DCRN_MALRXCTP2R
355 if (maldata->num_rx_chans > 2)
356 set_mal_dcrn(mal, DCRN_MALRXCTP2R,
357 mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
358#endif /* DCRN_MALRXCTP2R */
359#ifdef DCRN_MALRXCTP3R
360 if (maldata->num_rx_chans > 3)
361 set_mal_dcrn(mal, DCRN_MALRXCTP3R,
362 mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
363#endif /* DCRN_MALRXCTP3R */
364 474
365 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); 475 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
366 if (err) 476 if (err)
367 goto fail; 477 goto fail2;
368 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal); 478 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal);
369 if (err) 479 if (err)
370 goto fail; 480 goto fail3;
371 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); 481 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
372 if (err) 482 if (err)
373 goto fail; 483 goto fail4;
374 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); 484 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
375 if (err) 485 if (err)
376 goto fail; 486 goto fail5;
377 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); 487 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
378 if (err) 488 if (err)
379 goto fail; 489 goto fail6;
380 490
381 set_mal_dcrn(mal, DCRN_MALIER, 491 /* Enable all MAL SERR interrupt sources */
382 MALIER_DE | MALIER_NE | MALIER_TE | 492 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
383 MALIER_OPBE | MALIER_PLBE);
384 493
385 /* Advertise me to the rest of the world */ 494 /* Advertise this instance to the rest of the world */
386 ocp_set_drvdata(ocpdev, mal); 495 ocp_set_drvdata(ocpdev, mal);
387 496
388 printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n", 497 mal_dbg_register(mal->def->index, mal);
389 ocpdev->def->index, maldata->num_tx_chans,
390 maldata->num_rx_chans);
391 498
499 printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n",
500 mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans);
392 return 0; 501 return 0;
393 502
503 fail6:
504 free_irq(maldata->rxde_irq, mal);
505 fail5:
506 free_irq(maldata->txeob_irq, mal);
507 fail4:
508 free_irq(maldata->txde_irq, mal);
509 fail3:
510 free_irq(maldata->serr_irq, mal);
511 fail2:
512 dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
394 fail: 513 fail:
395 /* FIXME: dispose requested IRQs ! */ 514 kfree(mal);
396 if (err && mal)
397 kfree(mal);
398 return err; 515 return err;
399} 516}
400 517
401static void __exit mal_remove(struct ocp_device *ocpdev) 518static void __exit mal_remove(struct ocp_device *ocpdev)
402{ 519{
403 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); 520 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
404 struct ocp_func_mal_data *maldata = ocpdev->def->additions; 521 struct ocp_func_mal_data *maldata = mal->def->additions;
522
523 MAL_DBG("%d: remove" NL, mal->def->index);
405 524
406 BUG_ON(!maldata); 525 /* Syncronize with scheduled polling,
526 stolen from net/core/dev.c:dev_close()
527 */
528 clear_bit(__LINK_STATE_START, &mal->poll_dev.state);
529 netif_poll_disable(&mal->poll_dev);
530
531 if (!list_empty(&mal->list)) {
532 /* This is *very* bad */
533 printk(KERN_EMERG
534 "mal%d: commac list is not empty on remove!\n",
535 mal->def->index);
536 }
407 537
408 ocp_set_drvdata(ocpdev, NULL); 538 ocp_set_drvdata(ocpdev, NULL);
409 539
410 /* FIXME: shut down the MAL, deal with dependency with emac */
411 free_irq(maldata->serr_irq, mal); 540 free_irq(maldata->serr_irq, mal);
412 free_irq(maldata->txde_irq, mal); 541 free_irq(maldata->txde_irq, mal);
413 free_irq(maldata->txeob_irq, mal); 542 free_irq(maldata->txeob_irq, mal);
414 free_irq(maldata->rxde_irq, mal); 543 free_irq(maldata->rxde_irq, mal);
415 free_irq(maldata->rxeob_irq, mal); 544 free_irq(maldata->rxeob_irq, mal);
416 545
417 if (mal->tx_virt_addr) 546 mal_reset(mal);
418 dma_free_coherent(&ocpdev->dev,
419 MAL_DT_ALIGN * maldata->num_tx_chans,
420 mal->tx_virt_addr, mal->tx_phys_addr);
421 547
422 if (mal->rx_virt_addr) 548 mal_dbg_register(mal->def->index, NULL);
423 dma_free_coherent(&ocpdev->dev, 549
424 MAL_DT_ALIGN * maldata->num_rx_chans, 550 dma_free_coherent(&ocpdev->dev,
425 mal->rx_virt_addr, mal->rx_phys_addr); 551 sizeof(struct mal_descriptor) *
552 (NUM_TX_BUFF * maldata->num_tx_chans +
553 NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt,
554 mal->bd_dma);
426 555
427 kfree(mal); 556 kfree(mal);
428} 557}
429 558
430/* Structure for a device driver */ 559/* Structure for a device driver */
431static struct ocp_device_id mal_ids[] = { 560static struct ocp_device_id mal_ids[] = {
432 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL}, 561 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL },
433 {.vendor = OCP_VENDOR_INVALID} 562 { .vendor = OCP_VENDOR_INVALID}
434}; 563};
435 564
436static struct ocp_driver mal_driver = { 565static struct ocp_driver mal_driver = {
@@ -441,23 +570,14 @@ static struct ocp_driver mal_driver = {
441 .remove = mal_remove, 570 .remove = mal_remove,
442}; 571};
443 572
444static int __init init_mals(void) 573int __init mal_init(void)
445{ 574{
446 int rc; 575 MAL_DBG(": init" NL);
447 576 return ocp_register_driver(&mal_driver);
448 rc = ocp_register_driver(&mal_driver);
449 if (rc < 0) {
450 ocp_unregister_driver(&mal_driver);
451 return -ENODEV;
452 }
453
454 return 0;
455} 577}
456 578
457static void __exit exit_mals(void) 579void __exit mal_exit(void)
458{ 580{
581 MAL_DBG(": exit" NL);
459 ocp_unregister_driver(&mal_driver); 582 ocp_unregister_driver(&mal_driver);
460} 583}
461
462module_init(init_mals);
463module_exit(exit_mals);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index dd9f0dabc6e0..15b0bdae26ac 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -1,131 +1,267 @@
1#ifndef _IBM_EMAC_MAL_H 1/*
2#define _IBM_EMAC_MAL_H 2 * drivers/net/ibm_emac/ibm_emac_mal.h
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2002 MontaVista Softare Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __IBM_EMAC_MAL_H_
20#define __IBM_EMAC_MAL_H_
3 21
22#include <linux/config.h>
23#include <linux/init.h>
4#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/netdevice.h>
5 26
6#define MAL_DT_ALIGN (4096) /* Alignment for each channel's descriptor table */ 27#include <asm/io.h>
7 28
8#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan)) 29/*
30 * These MAL "versions" probably aren't the real versions IBM uses for these
31 * MAL cores, I assigned them just to make #ifdefs in this file nicer and
32 * reflect the fact that 40x and 44x have slightly different MALs. --ebs
33 */
34#if defined(CONFIG_405GP) || defined(CONFIG_405GPR) || defined(CONFIG_405EP) || \
35 defined(CONFIG_440EP) || defined(CONFIG_NP405H)
36#define MAL_VERSION 1
37#elif defined(CONFIG_440GP) || defined(CONFIG_440GX) || defined(CONFIG_440SP)
38#define MAL_VERSION 2
39#else
40#error "Unknown SoC, please check chip manual and choose MAL 'version'"
41#endif
42
43/* MALx DCR registers */
44#define MAL_CFG 0x00
45#define MAL_CFG_SR 0x80000000
46#define MAL_CFG_PLBB 0x00004000
47#define MAL_CFG_OPBBL 0x00000080
48#define MAL_CFG_EOPIE 0x00000004
49#define MAL_CFG_LEA 0x00000002
50#define MAL_CFG_SD 0x00000001
51#if MAL_VERSION == 1
52#define MAL_CFG_PLBP_MASK 0x00c00000
53#define MAL_CFG_PLBP_10 0x00800000
54#define MAL_CFG_GA 0x00200000
55#define MAL_CFG_OA 0x00100000
56#define MAL_CFG_PLBLE 0x00080000
57#define MAL_CFG_PLBT_MASK 0x00078000
58#define MAL_CFG_DEFAULT (MAL_CFG_PLBP_10 | MAL_CFG_PLBT_MASK)
59#elif MAL_VERSION == 2
60#define MAL_CFG_RPP_MASK 0x00c00000
61#define MAL_CFG_RPP_10 0x00800000
62#define MAL_CFG_RMBS_MASK 0x00300000
63#define MAL_CFG_WPP_MASK 0x000c0000
64#define MAL_CFG_WPP_10 0x00080000
65#define MAL_CFG_WMBS_MASK 0x00030000
66#define MAL_CFG_PLBLE 0x00008000
67#define MAL_CFG_DEFAULT (MAL_CFG_RMBS_MASK | MAL_CFG_WMBS_MASK | \
68 MAL_CFG_RPP_10 | MAL_CFG_WPP_10)
69#else
70#error "Unknown MAL version"
71#endif
72
73#define MAL_ESR 0x01
74#define MAL_ESR_EVB 0x80000000
75#define MAL_ESR_CIDT 0x40000000
76#define MAL_ESR_CID_MASK 0x3e000000
77#define MAL_ESR_CID_SHIFT 25
78#define MAL_ESR_DE 0x00100000
79#define MAL_ESR_OTE 0x00040000
80#define MAL_ESR_OSE 0x00020000
81#define MAL_ESR_PEIN 0x00010000
82#define MAL_ESR_DEI 0x00000010
83#define MAL_ESR_OTEI 0x00000004
84#define MAL_ESR_OSEI 0x00000002
85#define MAL_ESR_PBEI 0x00000001
86#if MAL_VERSION == 1
87#define MAL_ESR_ONE 0x00080000
88#define MAL_ESR_ONEI 0x00000008
89#elif MAL_VERSION == 2
90#define MAL_ESR_PTE 0x00800000
91#define MAL_ESR_PRE 0x00400000
92#define MAL_ESR_PWE 0x00200000
93#define MAL_ESR_PTEI 0x00000080
94#define MAL_ESR_PREI 0x00000040
95#define MAL_ESR_PWEI 0x00000020
96#else
97#error "Unknown MAL version"
98#endif
99
100#define MAL_IER 0x02
101#define MAL_IER_DE 0x00000010
102#define MAL_IER_OTE 0x00000004
103#define MAL_IER_OE 0x00000002
104#define MAL_IER_PE 0x00000001
105#if MAL_VERSION == 1
106#define MAL_IER_NWE 0x00000008
107#define MAL_IER_SOC_EVENTS MAL_IER_NWE
108#elif MAL_VERSION == 2
109#define MAL_IER_PT 0x00000080
110#define MAL_IER_PRE 0x00000040
111#define MAL_IER_PWE 0x00000020
112#define MAL_IER_SOC_EVENTS (MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE)
113#else
114#error "Unknown MAL version"
115#endif
116#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_OTE | \
117 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
118
119#define MAL_TXCASR 0x04
120#define MAL_TXCARR 0x05
121#define MAL_TXEOBISR 0x06
122#define MAL_TXDEIR 0x07
123#define MAL_RXCASR 0x10
124#define MAL_RXCARR 0x11
125#define MAL_RXEOBISR 0x12
126#define MAL_RXDEIR 0x13
127#define MAL_TXCTPR(n) ((n) + 0x20)
128#define MAL_RXCTPR(n) ((n) + 0x40)
129#define MAL_RCBS(n) ((n) + 0x60)
130
131/* In reality MAL can handle TX buffers up to 4095 bytes long,
132 * but this isn't a good round number :) --ebs
133 */
134#define MAL_MAX_TX_SIZE 4080
135#define MAL_MAX_RX_SIZE 4080
136
137static inline int mal_rx_size(int len)
138{
139 len = (len + 0xf) & ~0xf;
140 return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len;
141}
142
143static inline int mal_tx_chunks(int len)
144{
145 return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
146}
147
148#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
9 149
10/* MAL Buffer Descriptor structure */ 150/* MAL Buffer Descriptor structure */
11struct mal_descriptor { 151struct mal_descriptor {
12 unsigned short ctrl; /* MAL / Commac status control bits */ 152 u16 ctrl; /* MAL / Commac status control bits */
13 short data_len; /* Max length is 4K-1 (12 bits) */ 153 u16 data_len; /* Max length is 4K-1 (12 bits) */
14 unsigned char *data_ptr; /* pointer to actual data buffer */ 154 u32 data_ptr; /* pointer to actual data buffer */
15} __attribute__ ((packed)); 155};
16 156
17/* the following defines are for the MadMAL status and control registers. */ 157/* the following defines are for the MadMAL status and control registers. */
18/* MADMAL transmit and receive status/control bits */ 158/* MADMAL transmit and receive status/control bits */
19#define MAL_RX_CTRL_EMPTY 0x8000 159#define MAL_RX_CTRL_EMPTY 0x8000
20#define MAL_RX_CTRL_WRAP 0x4000 160#define MAL_RX_CTRL_WRAP 0x4000
21#define MAL_RX_CTRL_CM 0x2000 161#define MAL_RX_CTRL_CM 0x2000
22#define MAL_RX_CTRL_LAST 0x1000 162#define MAL_RX_CTRL_LAST 0x1000
23#define MAL_RX_CTRL_FIRST 0x0800 163#define MAL_RX_CTRL_FIRST 0x0800
24#define MAL_RX_CTRL_INTR 0x0400 164#define MAL_RX_CTRL_INTR 0x0400
25 165#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST)
26#define MAL_TX_CTRL_READY 0x8000 166#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE)
27#define MAL_TX_CTRL_WRAP 0x4000 167
28#define MAL_TX_CTRL_CM 0x2000 168#define MAL_TX_CTRL_READY 0x8000
29#define MAL_TX_CTRL_LAST 0x1000 169#define MAL_TX_CTRL_WRAP 0x4000
30#define MAL_TX_CTRL_INTR 0x0400 170#define MAL_TX_CTRL_CM 0x2000
171#define MAL_TX_CTRL_LAST 0x1000
172#define MAL_TX_CTRL_INTR 0x0400
31 173
32struct mal_commac_ops { 174struct mal_commac_ops {
33 void (*txeob) (void *dev, u32 chanmask); 175 void (*poll_tx) (void *dev);
34 void (*txde) (void *dev, u32 chanmask); 176 int (*poll_rx) (void *dev, int budget);
35 void (*rxeob) (void *dev, u32 chanmask); 177 int (*peek_rx) (void *dev);
36 void (*rxde) (void *dev, u32 chanmask); 178 void (*rxde) (void *dev);
37}; 179};
38 180
39struct mal_commac { 181struct mal_commac {
40 struct mal_commac_ops *ops; 182 struct mal_commac_ops *ops;
41 void *dev; 183 void *dev;
42 u32 tx_chan_mask, rx_chan_mask; 184 struct list_head poll_list;
43 struct list_head list; 185 int rx_stopped;
186
187 u32 tx_chan_mask;
188 u32 rx_chan_mask;
189 struct list_head list;
44}; 190};
45 191
46struct ibm_ocp_mal { 192struct ibm_ocp_mal {
47 int dcrbase; 193 int dcrbase;
48 194
49 struct list_head commac; 195 struct list_head poll_list;
50 u32 tx_chan_mask, rx_chan_mask; 196 struct net_device poll_dev;
51 197
52 dma_addr_t tx_phys_addr; 198 struct list_head list;
53 struct mal_descriptor *tx_virt_addr; 199 u32 tx_chan_mask;
200 u32 rx_chan_mask;
54 201
55 dma_addr_t rx_phys_addr; 202 dma_addr_t bd_dma;
56 struct mal_descriptor *rx_virt_addr; 203 struct mal_descriptor *bd_virt;
57};
58 204
59#define GET_MAL_STANZA(base,dcrn) \ 205 struct ocp_def *def;
60 case base: \ 206};
61 x = mfdcr(dcrn(base)); \
62 break;
63
64#define SET_MAL_STANZA(base,dcrn, val) \
65 case base: \
66 mtdcr(dcrn(base), (val)); \
67 break;
68
69#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn)
70#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val)
71
72#ifdef DCRN_MAL1_BASE
73#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn)
74#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val)
75#else /* ! DCRN_MAL1_BASE */
76#define GET_MAL1_STANZA(dcrn)
77#define SET_MAL1_STANZA(dcrn,val)
78#endif
79 207
80#define get_mal_dcrn(mal, dcrn) ({ \ 208static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
81 u32 x; \
82 switch ((mal)->dcrbase) { \
83 GET_MAL0_STANZA(dcrn) \
84 GET_MAL1_STANZA(dcrn) \
85 default: \
86 x = 0; \
87 BUG(); \
88 } \
89x; })
90
91#define set_mal_dcrn(mal, dcrn, val) do { \
92 switch ((mal)->dcrbase) { \
93 SET_MAL0_STANZA(dcrn,val) \
94 SET_MAL1_STANZA(dcrn,val) \
95 default: \
96 BUG(); \
97 } } while (0)
98
99static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
100{ 209{
101 set_mal_dcrn(mal, DCRN_MALTXCASR, 210 return mfdcr(mal->dcrbase + reg);
102 get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask);
103} 211}
104 212
105static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal, 213static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
106 u32 chanmask)
107{ 214{
108 set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask); 215 mtdcr(mal->dcrbase + reg, val);
109} 216}
110 217
111static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask) 218/* Register MAL devices */
112{ 219int mal_init(void) __init;
113 set_mal_dcrn(mal, DCRN_MALRXCASR, 220void mal_exit(void) __exit;
114 get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask);
115}
116 221
117static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal, 222int mal_register_commac(struct ibm_ocp_mal *mal,
118 u32 chanmask) 223 struct mal_commac *commac) __init;
119{ 224void mal_unregister_commac(struct ibm_ocp_mal *mal,
120 set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask); 225 struct mal_commac *commac) __exit;
121} 226int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size);
227
228/* Returns BD ring offset for a particular channel
229 (in 'struct mal_descriptor' elements)
230*/
231int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel);
232int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel);
233
234void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel);
235void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel);
236void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel);
237void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel);
122 238
123extern int mal_register_commac(struct ibm_ocp_mal *mal, 239/* Add/remove EMAC to/from MAL polling list */
124 struct mal_commac *commac); 240void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac);
125extern int mal_unregister_commac(struct ibm_ocp_mal *mal, 241void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac);
126 struct mal_commac *commac); 242
243/* Ethtool MAL registers */
244struct ibm_mal_regs {
245 u32 tx_count;
246 u32 rx_count;
247
248 u32 cfg;
249 u32 esr;
250 u32 ier;
251 u32 tx_casr;
252 u32 tx_carr;
253 u32 tx_eobisr;
254 u32 tx_deir;
255 u32 rx_casr;
256 u32 rx_carr;
257 u32 rx_eobisr;
258 u32 rx_deir;
259 u32 tx_ctpr[32];
260 u32 rx_ctpr[32];
261 u32 rcbs[32];
262};
127 263
128extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, 264int mal_get_regs_len(struct ibm_ocp_mal *mal);
129 unsigned long size); 265void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf);
130 266
131#endif /* _IBM_EMAC_MAL_H */ 267#endif /* __IBM_EMAC_MAL_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
index 14213f090e91..a27e49cfe43b 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.c
+++ b/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -1,96 +1,80 @@
1/* 1/*
2 * ibm_ocp_phy.c 2 * drivers/net/ibm_emac/ibm_emac_phy.c
3 * 3 *
4 * PHY drivers for the ibm ocp ethernet driver. Borrowed 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
5 * from sungem_phy.c, though I only kept the generic MII 5 * Borrowed from sungem_phy.c, though I only kept the generic MII
6 * driver for now. 6 * driver for now.
7 * 7 *
8 * This file should be shared with other drivers or eventually 8 * This file should be shared with other drivers or eventually
9 * merged as the "low level" part of miilib 9 * merged as the "low level" part of miilib
10 * 10 *
11 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) 11 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
12 * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
12 * 13 *
13 */ 14 */
14
15#include <linux/config.h> 15#include <linux/config.h>
16
17#include <linux/module.h> 16#include <linux/module.h>
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/types.h> 18#include <linux/types.h>
22#include <linux/netdevice.h> 19#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/mii.h> 20#include <linux/mii.h>
25#include <linux/ethtool.h> 21#include <linux/ethtool.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
27 23
24#include <asm/ocp.h>
25
28#include "ibm_emac_phy.h" 26#include "ibm_emac_phy.h"
29 27
30static int reset_one_mii_phy(struct mii_phy *phy, int phy_id) 28static inline int phy_read(struct mii_phy *phy, int reg)
29{
30 return phy->mdio_read(phy->dev, phy->address, reg);
31}
32
33static inline void phy_write(struct mii_phy *phy, int reg, int val)
31{ 34{
32 u16 val; 35 phy->mdio_write(phy->dev, phy->address, reg, val);
36}
37
38int mii_reset_phy(struct mii_phy *phy)
39{
40 int val;
33 int limit = 10000; 41 int limit = 10000;
34 42
35 val = __phy_read(phy, phy_id, MII_BMCR); 43 val = phy_read(phy, MII_BMCR);
36 val &= ~BMCR_ISOLATE; 44 val &= ~BMCR_ISOLATE;
37 val |= BMCR_RESET; 45 val |= BMCR_RESET;
38 __phy_write(phy, phy_id, MII_BMCR, val); 46 phy_write(phy, MII_BMCR, val);
39 47
40 udelay(100); 48 udelay(300);
41 49
42 while (limit--) { 50 while (limit--) {
43 val = __phy_read(phy, phy_id, MII_BMCR); 51 val = phy_read(phy, MII_BMCR);
44 if ((val & BMCR_RESET) == 0) 52 if (val >= 0 && (val & BMCR_RESET) == 0)
45 break; 53 break;
46 udelay(10); 54 udelay(10);
47 } 55 }
48 if ((val & BMCR_ISOLATE) && limit > 0) 56 if ((val & BMCR_ISOLATE) && limit > 0)
49 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); 57 phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
50
51 return (limit <= 0);
52}
53
54static int cis8201_init(struct mii_phy *phy)
55{
56 u16 epcr;
57
58 epcr = phy_read(phy, MII_CIS8201_EPCR);
59 epcr &= ~EPCR_MODE_MASK;
60
61 switch (phy->mode) {
62 case PHY_MODE_TBI:
63 epcr |= EPCR_TBI_MODE;
64 break;
65 case PHY_MODE_RTBI:
66 epcr |= EPCR_RTBI_MODE;
67 break;
68 case PHY_MODE_GMII:
69 epcr |= EPCR_GMII_MODE;
70 break;
71 case PHY_MODE_RGMII:
72 default:
73 epcr |= EPCR_RGMII_MODE;
74 }
75 58
76 phy_write(phy, MII_CIS8201_EPCR, epcr); 59 return limit <= 0;
77
78 return 0;
79} 60}
80 61
81static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) 62static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
82{ 63{
83 u16 ctl, adv; 64 int ctl, adv;
84 65
85 phy->autoneg = 1; 66 phy->autoneg = AUTONEG_ENABLE;
86 phy->speed = SPEED_10; 67 phy->speed = SPEED_10;
87 phy->duplex = DUPLEX_HALF; 68 phy->duplex = DUPLEX_HALF;
88 phy->pause = 0; 69 phy->pause = phy->asym_pause = 0;
89 phy->advertising = advertise; 70 phy->advertising = advertise;
90 71
91 /* Setup standard advertise */ 72 /* Setup standard advertise */
92 adv = phy_read(phy, MII_ADVERTISE); 73 adv = phy_read(phy, MII_ADVERTISE);
93 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 74 if (adv < 0)
75 return adv;
76 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
77 ADVERTISE_PAUSE_ASYM);
94 if (advertise & ADVERTISED_10baseT_Half) 78 if (advertise & ADVERTISED_10baseT_Half)
95 adv |= ADVERTISE_10HALF; 79 adv |= ADVERTISE_10HALF;
96 if (advertise & ADVERTISED_10baseT_Full) 80 if (advertise & ADVERTISED_10baseT_Full)
@@ -99,8 +83,25 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
99 adv |= ADVERTISE_100HALF; 83 adv |= ADVERTISE_100HALF;
100 if (advertise & ADVERTISED_100baseT_Full) 84 if (advertise & ADVERTISED_100baseT_Full)
101 adv |= ADVERTISE_100FULL; 85 adv |= ADVERTISE_100FULL;
86 if (advertise & ADVERTISED_Pause)
87 adv |= ADVERTISE_PAUSE_CAP;
88 if (advertise & ADVERTISED_Asym_Pause)
89 adv |= ADVERTISE_PAUSE_ASYM;
102 phy_write(phy, MII_ADVERTISE, adv); 90 phy_write(phy, MII_ADVERTISE, adv);
103 91
92 if (phy->features &
93 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
94 adv = phy_read(phy, MII_CTRL1000);
95 if (adv < 0)
96 return adv;
97 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
98 if (advertise & ADVERTISED_1000baseT_Full)
99 adv |= ADVERTISE_1000FULL;
100 if (advertise & ADVERTISED_1000baseT_Half)
101 adv |= ADVERTISE_1000HALF;
102 phy_write(phy, MII_CTRL1000, adv);
103 }
104
104 /* Start/Restart aneg */ 105 /* Start/Restart aneg */
105 ctl = phy_read(phy, MII_BMCR); 106 ctl = phy_read(phy, MII_BMCR);
106 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 107 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
@@ -111,14 +112,16 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
111 112
112static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) 113static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
113{ 114{
114 u16 ctl; 115 int ctl;
115 116
116 phy->autoneg = 0; 117 phy->autoneg = AUTONEG_DISABLE;
117 phy->speed = speed; 118 phy->speed = speed;
118 phy->duplex = fd; 119 phy->duplex = fd;
119 phy->pause = 0; 120 phy->pause = phy->asym_pause = 0;
120 121
121 ctl = phy_read(phy, MII_BMCR); 122 ctl = phy_read(phy, MII_BMCR);
123 if (ctl < 0)
124 return ctl;
122 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); 125 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
123 126
124 /* First reset the PHY */ 127 /* First reset the PHY */
@@ -132,6 +135,8 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
132 ctl |= BMCR_SPEED100; 135 ctl |= BMCR_SPEED100;
133 break; 136 break;
134 case SPEED_1000: 137 case SPEED_1000:
138 ctl |= BMCR_SPEED1000;
139 break;
135 default: 140 default:
136 return -EINVAL; 141 return -EINVAL;
137 } 142 }
@@ -144,112 +149,143 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
144 149
145static int genmii_poll_link(struct mii_phy *phy) 150static int genmii_poll_link(struct mii_phy *phy)
146{ 151{
147 u16 status; 152 int status;
148 153
149 (void)phy_read(phy, MII_BMSR); 154 /* Clear latched value with dummy read */
155 phy_read(phy, MII_BMSR);
150 status = phy_read(phy, MII_BMSR); 156 status = phy_read(phy, MII_BMSR);
151 if ((status & BMSR_LSTATUS) == 0) 157 if (status < 0 || (status & BMSR_LSTATUS) == 0)
152 return 0; 158 return 0;
153 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) 159 if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
154 return 0; 160 return 0;
155 return 1; 161 return 1;
156} 162}
157 163
158#define MII_CIS8201_ACSR 0x1c 164static int genmii_read_link(struct mii_phy *phy)
159#define ACSR_DUPLEX_STATUS 0x0020
160#define ACSR_SPEED_1000BASET 0x0010
161#define ACSR_SPEED_100BASET 0x0008
162
163static int cis8201_read_link(struct mii_phy *phy)
164{ 165{
165 u16 acsr; 166 if (phy->autoneg == AUTONEG_ENABLE) {
167 int glpa = 0;
168 int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
169 if (lpa < 0)
170 return lpa;
171
172 if (phy->features &
173 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
174 int adv = phy_read(phy, MII_CTRL1000);
175 glpa = phy_read(phy, MII_STAT1000);
176
177 if (glpa < 0 || adv < 0)
178 return adv;
179
180 glpa &= adv << 2;
181 }
182
183 phy->speed = SPEED_10;
184 phy->duplex = DUPLEX_HALF;
185 phy->pause = phy->asym_pause = 0;
186
187 if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
188 phy->speed = SPEED_1000;
189 if (glpa & LPA_1000FULL)
190 phy->duplex = DUPLEX_FULL;
191 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
192 phy->speed = SPEED_100;
193 if (lpa & LPA_100FULL)
194 phy->duplex = DUPLEX_FULL;
195 } else if (lpa & LPA_10FULL)
196 phy->duplex = DUPLEX_FULL;
166 197
167 if (phy->autoneg) { 198 if (phy->duplex == DUPLEX_FULL) {
168 acsr = phy_read(phy, MII_CIS8201_ACSR); 199 phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
200 phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
201 }
202 } else {
203 int bmcr = phy_read(phy, MII_BMCR);
204 if (bmcr < 0)
205 return bmcr;
169 206
170 if (acsr & ACSR_DUPLEX_STATUS) 207 if (bmcr & BMCR_FULLDPLX)
171 phy->duplex = DUPLEX_FULL; 208 phy->duplex = DUPLEX_FULL;
172 else 209 else
173 phy->duplex = DUPLEX_HALF; 210 phy->duplex = DUPLEX_HALF;
174 if (acsr & ACSR_SPEED_1000BASET) { 211 if (bmcr & BMCR_SPEED1000)
175 phy->speed = SPEED_1000; 212 phy->speed = SPEED_1000;
176 } else if (acsr & ACSR_SPEED_100BASET) 213 else if (bmcr & BMCR_SPEED100)
177 phy->speed = SPEED_100; 214 phy->speed = SPEED_100;
178 else 215 else
179 phy->speed = SPEED_10; 216 phy->speed = SPEED_10;
180 phy->pause = 0;
181 }
182 /* On non-aneg, we assume what we put in BMCR is the speed,
183 * though magic-aneg shouldn't prevent this case from occurring
184 */
185 217
218 phy->pause = phy->asym_pause = 0;
219 }
186 return 0; 220 return 0;
187} 221}
188 222
189static int genmii_read_link(struct mii_phy *phy) 223/* Generic implementation for most 10/100/1000 PHYs */
224static struct mii_phy_ops generic_phy_ops = {
225 .setup_aneg = genmii_setup_aneg,
226 .setup_forced = genmii_setup_forced,
227 .poll_link = genmii_poll_link,
228 .read_link = genmii_read_link
229};
230
231static struct mii_phy_def genmii_phy_def = {
232 .phy_id = 0x00000000,
233 .phy_id_mask = 0x00000000,
234 .name = "Generic MII",
235 .ops = &generic_phy_ops
236};
237
238/* CIS8201 */
239#define MII_CIS8201_EPCR 0x17
240#define EPCR_MODE_MASK 0x3000
241#define EPCR_GMII_MODE 0x0000
242#define EPCR_RGMII_MODE 0x1000
243#define EPCR_TBI_MODE 0x2000
244#define EPCR_RTBI_MODE 0x3000
245
246static int cis8201_init(struct mii_phy *phy)
190{ 247{
191 u16 lpa; 248 int epcr;
192 249
193 if (phy->autoneg) { 250 epcr = phy_read(phy, MII_CIS8201_EPCR);
194 lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); 251 if (epcr < 0)
252 return epcr;
195 253
196 phy->speed = SPEED_10; 254 epcr &= ~EPCR_MODE_MASK;
197 phy->duplex = DUPLEX_HALF;
198 phy->pause = 0;
199 255
200 if (lpa & (LPA_100FULL | LPA_100HALF)) { 256 switch (phy->mode) {
201 phy->speed = SPEED_100; 257 case PHY_MODE_TBI:
202 if (lpa & LPA_100FULL) 258 epcr |= EPCR_TBI_MODE;
203 phy->duplex = DUPLEX_FULL; 259 break;
204 } else if (lpa & LPA_10FULL) 260 case PHY_MODE_RTBI:
205 phy->duplex = DUPLEX_FULL; 261 epcr |= EPCR_RTBI_MODE;
262 break;
263 case PHY_MODE_GMII:
264 epcr |= EPCR_GMII_MODE;
265 break;
266 case PHY_MODE_RGMII:
267 default:
268 epcr |= EPCR_RGMII_MODE;
206 } 269 }
207 /* On non-aneg, we assume what we put in BMCR is the speed, 270
208 * though magic-aneg shouldn't prevent this case from occurring 271 phy_write(phy, MII_CIS8201_EPCR, epcr);
209 */
210 272
211 return 0; 273 return 0;
212} 274}
213 275
214#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
215 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
216 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
217#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
218 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
219
220/* CIS8201 phy ops */
221static struct mii_phy_ops cis8201_phy_ops = { 276static struct mii_phy_ops cis8201_phy_ops = {
222 init:cis8201_init, 277 .init = cis8201_init,
223 setup_aneg:genmii_setup_aneg, 278 .setup_aneg = genmii_setup_aneg,
224 setup_forced:genmii_setup_forced, 279 .setup_forced = genmii_setup_forced,
225 poll_link:genmii_poll_link, 280 .poll_link = genmii_poll_link,
226 read_link:cis8201_read_link 281 .read_link = genmii_read_link
227};
228
229/* Generic implementation for most 10/100 PHYs */
230static struct mii_phy_ops generic_phy_ops = {
231 setup_aneg:genmii_setup_aneg,
232 setup_forced:genmii_setup_forced,
233 poll_link:genmii_poll_link,
234 read_link:genmii_read_link
235}; 282};
236 283
237static struct mii_phy_def cis8201_phy_def = { 284static struct mii_phy_def cis8201_phy_def = {
238 phy_id:0x000fc410, 285 .phy_id = 0x000fc410,
239 phy_id_mask:0x000ffff0, 286 .phy_id_mask = 0x000ffff0,
240 name:"CIS8201 Gigabit Ethernet", 287 .name = "CIS8201 Gigabit Ethernet",
241 features:MII_GBIT_FEATURES, 288 .ops = &cis8201_phy_ops
242 magic_aneg:0,
243 ops:&cis8201_phy_ops
244};
245
246static struct mii_phy_def genmii_phy_def = {
247 phy_id:0x00000000,
248 phy_id_mask:0x00000000,
249 name:"Generic MII",
250 features:MII_BASIC_FEATURES,
251 magic_aneg:0,
252 ops:&generic_phy_ops
253}; 289};
254 290
255static struct mii_phy_def *mii_phy_table[] = { 291static struct mii_phy_def *mii_phy_table[] = {
@@ -258,39 +294,60 @@ static struct mii_phy_def *mii_phy_table[] = {
258 NULL 294 NULL
259}; 295};
260 296
261int mii_phy_probe(struct mii_phy *phy, int mii_id) 297int mii_phy_probe(struct mii_phy *phy, int address)
262{ 298{
263 int rc;
264 u32 id;
265 struct mii_phy_def *def; 299 struct mii_phy_def *def;
266 int i; 300 int i;
301 u32 id;
267 302
268 phy->autoneg = 0; 303 phy->autoneg = AUTONEG_DISABLE;
269 phy->advertising = 0; 304 phy->advertising = 0;
270 phy->mii_id = mii_id; 305 phy->address = address;
271 phy->speed = 0; 306 phy->speed = SPEED_10;
272 phy->duplex = 0; 307 phy->duplex = DUPLEX_HALF;
273 phy->pause = 0; 308 phy->pause = phy->asym_pause = 0;
274 309
275 /* Take PHY out of isloate mode and reset it. */ 310 /* Take PHY out of isolate mode and reset it. */
276 rc = reset_one_mii_phy(phy, mii_id); 311 if (mii_reset_phy(phy))
277 if (rc)
278 return -ENODEV; 312 return -ENODEV;
279 313
280 /* Read ID and find matching entry */ 314 /* Read ID and find matching entry */
281 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)) 315 id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
282 & 0xfffffff0;
283 for (i = 0; (def = mii_phy_table[i]) != NULL; i++) 316 for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
284 if ((id & def->phy_id_mask) == def->phy_id) 317 if ((id & def->phy_id_mask) == def->phy_id)
285 break; 318 break;
286 /* Should never be NULL (we have a generic entry), but... */ 319 /* Should never be NULL (we have a generic entry), but... */
287 if (def == NULL) 320 if (!def)
288 return -ENODEV; 321 return -ENODEV;
289 322
290 phy->def = def; 323 phy->def = def;
291 324
325 /* Determine PHY features if needed */
326 phy->features = def->features;
327 if (!phy->features) {
328 u16 bmsr = phy_read(phy, MII_BMSR);
329 if (bmsr & BMSR_ANEGCAPABLE)
330 phy->features |= SUPPORTED_Autoneg;
331 if (bmsr & BMSR_10HALF)
332 phy->features |= SUPPORTED_10baseT_Half;
333 if (bmsr & BMSR_10FULL)
334 phy->features |= SUPPORTED_10baseT_Full;
335 if (bmsr & BMSR_100HALF)
336 phy->features |= SUPPORTED_100baseT_Half;
337 if (bmsr & BMSR_100FULL)
338 phy->features |= SUPPORTED_100baseT_Full;
339 if (bmsr & BMSR_ESTATEN) {
340 u16 esr = phy_read(phy, MII_ESTATUS);
341 if (esr & ESTATUS_1000_TFULL)
342 phy->features |= SUPPORTED_1000baseT_Full;
343 if (esr & ESTATUS_1000_THALF)
344 phy->features |= SUPPORTED_1000baseT_Half;
345 }
346 phy->features |= SUPPORTED_MII;
347 }
348
292 /* Setup default advertising */ 349 /* Setup default advertising */
293 phy->advertising = def->features; 350 phy->advertising = phy->features;
294 351
295 return 0; 352 return 0;
296} 353}
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.h b/drivers/net/ibm_emac/ibm_emac_phy.h
index 61afbea96563..a70e0fea54c4 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.h
+++ b/drivers/net/ibm_emac/ibm_emac_phy.h
@@ -1,65 +1,25 @@
1
2/* 1/*
3 * ibm_emac_phy.h 2 * drivers/net/ibm_emac/ibm_emac_phy.h
4 *
5 * 3 *
6 * Benjamin Herrenschmidt <benh@kernel.crashing.org> 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support
7 * February 2003
8 * 5 *
9 * This program is free software; you can redistribute it and/or modify it 6 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 * under the terms of the GNU General Public License as published by the 7 * February 2003
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 * 8 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 9 * Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 * 10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
29 * 15 *
30 * This file basically duplicates sungem_phy.{c,h} with different PHYs 16 * This file basically duplicates sungem_phy.{c,h} with different PHYs
31 * supported. I'm looking into merging that in a single mii layer more 17 * supported. I'm looking into merging that in a single mii layer more
32 * flexible than mii.c 18 * flexible than mii.c
33 */ 19 */
34 20
35#ifndef _IBM_EMAC_PHY_H_ 21#ifndef _IBM_OCP_PHY_H_
36#define _IBM_EMAC_PHY_H_ 22#define _IBM_OCP_PHY_H_
37
38/*
39 * PHY mode settings
40 * Used for multi-mode capable PHYs
41 */
42#define PHY_MODE_NA 0
43#define PHY_MODE_MII 1
44#define PHY_MODE_RMII 2
45#define PHY_MODE_SMII 3
46#define PHY_MODE_RGMII 4
47#define PHY_MODE_TBI 5
48#define PHY_MODE_GMII 6
49#define PHY_MODE_RTBI 7
50#define PHY_MODE_SGMII 8
51
52/*
53 * PHY specific registers/values
54 */
55
56/* CIS8201 */
57#define MII_CIS8201_EPCR 0x17
58#define EPCR_MODE_MASK 0x3000
59#define EPCR_GMII_MODE 0x0000
60#define EPCR_RGMII_MODE 0x1000
61#define EPCR_TBI_MODE 0x2000
62#define EPCR_RTBI_MODE 0x3000
63 23
64struct mii_phy; 24struct mii_phy;
65 25
@@ -77,7 +37,8 @@ struct mii_phy_ops {
77struct mii_phy_def { 37struct mii_phy_def {
78 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ 38 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
79 u32 phy_id_mask; /* Significant bits */ 39 u32 phy_id_mask; /* Significant bits */
80 u32 features; /* Ethtool SUPPORTED_* defines */ 40 u32 features; /* Ethtool SUPPORTED_* defines or
41 0 for autodetect */
81 int magic_aneg; /* Autoneg does all speed test for us */ 42 int magic_aneg; /* Autoneg does all speed test for us */
82 const char *name; 43 const char *name;
83 const struct mii_phy_ops *ops; 44 const struct mii_phy_ops *ops;
@@ -86,8 +47,11 @@ struct mii_phy_def {
86/* An instance of a PHY, partially borrowed from mii_if_info */ 47/* An instance of a PHY, partially borrowed from mii_if_info */
87struct mii_phy { 48struct mii_phy {
88 struct mii_phy_def *def; 49 struct mii_phy_def *def;
89 int advertising; 50 u32 advertising; /* Ethtool ADVERTISED_* defines */
90 int mii_id; 51 u32 features; /* Copied from mii_phy_def.features
52 or determined automaticaly */
53 int address; /* PHY address */
54 int mode; /* PHY mode */
91 55
92 /* 1: autoneg enabled, 0: disabled */ 56 /* 1: autoneg enabled, 0: disabled */
93 int autoneg; 57 int autoneg;
@@ -98,40 +62,19 @@ struct mii_phy {
98 int speed; 62 int speed;
99 int duplex; 63 int duplex;
100 int pause; 64 int pause;
101 65 int asym_pause;
102 /* PHY mode - if needed */
103 int mode;
104 66
105 /* Provided by host chip */ 67 /* Provided by host chip */
106 struct net_device *dev; 68 struct net_device *dev;
107 int (*mdio_read) (struct net_device * dev, int mii_id, int reg); 69 int (*mdio_read) (struct net_device * dev, int addr, int reg);
108 void (*mdio_write) (struct net_device * dev, int mii_id, int reg, 70 void (*mdio_write) (struct net_device * dev, int addr, int reg,
109 int val); 71 int val);
110}; 72};
111 73
112/* Pass in a struct mii_phy with dev, mdio_read and mdio_write 74/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
113 * filled, the remaining fields will be filled on return 75 * filled, the remaining fields will be filled on return
114 */ 76 */
115extern int mii_phy_probe(struct mii_phy *phy, int mii_id); 77int mii_phy_probe(struct mii_phy *phy, int address);
116 78int mii_reset_phy(struct mii_phy *phy);
117static inline int __phy_read(struct mii_phy *phy, int id, int reg)
118{
119 return phy->mdio_read(phy->dev, id, reg);
120}
121
122static inline void __phy_write(struct mii_phy *phy, int id, int reg, int val)
123{
124 phy->mdio_write(phy->dev, id, reg, val);
125}
126
127static inline int phy_read(struct mii_phy *phy, int reg)
128{
129 return phy->mdio_read(phy->dev, phy->mii_id, reg);
130}
131
132static inline void phy_write(struct mii_phy *phy, int reg, int val)
133{
134 phy->mdio_write(phy->dev, phy->mii_id, reg, val);
135}
136 79
137#endif /* _IBM_EMAC_PHY_H_ */ 80#endif /* _IBM_OCP_PHY_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.c b/drivers/net/ibm_emac/ibm_emac_rgmii.c
new file mode 100644
index 000000000000..f0b1ffb2dbbf
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.c
@@ -0,0 +1,201 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * Copyright 2004 MontaVista Software, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/ethtool.h>
22#include <asm/io.h>
23
24#include "ibm_emac_core.h"
25#include "ibm_emac_debug.h"
26
27/* RGMIIx_FER */
28#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4))
29#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4))
30#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
31#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
32#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
33
34/* RGMIIx_SSR */
35#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
36#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
37#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
38
39/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
40static inline int rgmii_valid_mode(int phy_mode)
41{
42 return phy_mode == PHY_MODE_GMII ||
43 phy_mode == PHY_MODE_RGMII ||
44 phy_mode == PHY_MODE_TBI ||
45 phy_mode == PHY_MODE_RTBI;
46}
47
48static inline const char *rgmii_mode_name(int mode)
49{
50 switch (mode) {
51 case PHY_MODE_RGMII:
52 return "RGMII";
53 case PHY_MODE_TBI:
54 return "TBI";
55 case PHY_MODE_GMII:
56 return "GMII";
57 case PHY_MODE_RTBI:
58 return "RTBI";
59 default:
60 BUG();
61 }
62}
63
64static inline u32 rgmii_mode_mask(int mode, int input)
65{
66 switch (mode) {
67 case PHY_MODE_RGMII:
68 return RGMII_FER_RGMII(input);
69 case PHY_MODE_TBI:
70 return RGMII_FER_TBI(input);
71 case PHY_MODE_GMII:
72 return RGMII_FER_GMII(input);
73 case PHY_MODE_RTBI:
74 return RGMII_FER_RTBI(input);
75 default:
76 BUG();
77 }
78}
79
80static int __init rgmii_init(struct ocp_device *ocpdev, int input, int mode)
81{
82 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
83 struct rgmii_regs *p;
84
85 RGMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, mode);
86
87 if (!dev) {
88 dev = kzalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL);
89 if (!dev) {
90 printk(KERN_ERR
91 "rgmii%d: couldn't allocate device structure!\n",
92 ocpdev->def->index);
93 return -ENOMEM;
94 }
95
96 p = (struct rgmii_regs *)ioremap(ocpdev->def->paddr,
97 sizeof(struct rgmii_regs));
98 if (!p) {
99 printk(KERN_ERR
100 "rgmii%d: could not ioremap device registers!\n",
101 ocpdev->def->index);
102 kfree(dev);
103 return -ENOMEM;
104 }
105
106 dev->base = p;
107 ocp_set_drvdata(ocpdev, dev);
108
109 /* Disable all inputs by default */
110 out_be32(&p->fer, 0);
111 } else
112 p = dev->base;
113
114 /* Enable this input */
115 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
116
117 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n",
118 ocpdev->def->index, input, rgmii_mode_name(mode));
119
120 ++dev->users;
121 return 0;
122}
123
124int __init rgmii_attach(void *emac)
125{
126 struct ocp_enet_private *dev = emac;
127 struct ocp_func_emac_data *emacdata = dev->def->additions;
128
129 /* Check if we need to attach to a RGMII */
130 if (emacdata->rgmii_idx >= 0 && rgmii_valid_mode(emacdata->phy_mode)) {
131 dev->rgmii_input = emacdata->rgmii_mux;
132 dev->rgmii_dev =
133 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_RGMII,
134 emacdata->rgmii_idx);
135 if (!dev->rgmii_dev) {
136 printk(KERN_ERR "emac%d: unknown rgmii%d!\n",
137 dev->def->index, emacdata->rgmii_idx);
138 return -ENODEV;
139 }
140 if (rgmii_init
141 (dev->rgmii_dev, dev->rgmii_input, emacdata->phy_mode)) {
142 printk(KERN_ERR
143 "emac%d: rgmii%d initialization failed!\n",
144 dev->def->index, emacdata->rgmii_idx);
145 return -ENODEV;
146 }
147 }
148 return 0;
149}
150
151void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
152{
153 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
154 u32 ssr = in_be32(&dev->base->ssr) & ~RGMII_SSR_MASK(input);
155
156 RGMII_DBG("%d: speed(%d, %d)" NL, ocpdev->def->index, input, speed);
157
158 if (speed == SPEED_1000)
159 ssr |= RGMII_SSR_1000(input);
160 else if (speed == SPEED_100)
161 ssr |= RGMII_SSR_100(input);
162
163 out_be32(&dev->base->ssr, ssr);
164}
165
166void __exit __rgmii_fini(struct ocp_device *ocpdev, int input)
167{
168 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
169 BUG_ON(!dev || dev->users == 0);
170
171 RGMII_DBG("%d: fini(%d)" NL, ocpdev->def->index, input);
172
173 /* Disable this input */
174 out_be32(&dev->base->fer,
175 in_be32(&dev->base->fer) & ~RGMII_FER_MASK(input));
176
177 if (!--dev->users) {
178 /* Free everything if this is the last user */
179 ocp_set_drvdata(ocpdev, NULL);
180 iounmap((void *)dev->base);
181 kfree(dev);
182 }
183}
184
185int __rgmii_get_regs_len(struct ocp_device *ocpdev)
186{
187 return sizeof(struct emac_ethtool_regs_subhdr) +
188 sizeof(struct rgmii_regs);
189}
190
191void *rgmii_dump_regs(struct ocp_device *ocpdev, void *buf)
192{
193 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
194 struct emac_ethtool_regs_subhdr *hdr = buf;
195 struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
196
197 hdr->version = 0;
198 hdr->index = ocpdev->def->index;
199 memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
200 return regs + 1;
201}
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
index 49f188f4ea6e..a1ffb8a44fff 100644
--- a/drivers/net/ibm_emac/ibm_emac_rgmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Defines for the IBM RGMII bridge 2 * drivers/net/ibm_emac/ibm_emac_rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
3 * 5 *
4 * Based on ocp_zmii.h/ibm_emac_zmii.h 6 * Based on ocp_zmii.h/ibm_emac_zmii.h
5 * Armin Kuster akuster@mvista.com 7 * Armin Kuster akuster@mvista.com
@@ -7,6 +9,9 @@
7 * Copyright 2004 MontaVista Software, Inc. 9 * Copyright 2004 MontaVista Software, Inc.
8 * Matt Porter <mporter@kernel.crashing.org> 10 * Matt Porter <mporter@kernel.crashing.org>
9 * 11 *
12 * Copyright (c) 2004, 2005 Zultys Technologies.
13 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 *
10 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -19,47 +24,42 @@
19#include <linux/config.h> 24#include <linux/config.h>
20 25
21/* RGMII bridge */ 26/* RGMII bridge */
22typedef struct rgmii_regs { 27struct rgmii_regs {
23 u32 fer; /* Function enable register */ 28 u32 fer; /* Function enable register */
24 u32 ssr; /* Speed select register */ 29 u32 ssr; /* Speed select register */
25} rgmii_t; 30};
26
27#define RGMII_INPUTS 4
28 31
29/* RGMII device */ 32/* RGMII device */
30struct ibm_ocp_rgmii { 33struct ibm_ocp_rgmii {
31 struct rgmii_regs *base; 34 struct rgmii_regs *base;
32 int mode[RGMII_INPUTS];
33 int users; /* number of EMACs using this RGMII bridge */ 35 int users; /* number of EMACs using this RGMII bridge */
34}; 36};
35 37
36/* Fuctional Enable Reg */ 38#ifdef CONFIG_IBM_EMAC_RGMII
37#define RGMII_FER_MASK(x) (0x00000007 << (4*x)) 39int rgmii_attach(void *emac) __init;
38#define RGMII_RTBI 0x00000004
39#define RGMII_RGMII 0x00000005
40#define RGMII_TBI 0x00000006
41#define RGMII_GMII 0x00000007
42
43/* Speed Selection reg */
44 40
45#define RGMII_SP2_100 0x00000002 41void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit;
46#define RGMII_SP2_1000 0x00000004 42static inline void rgmii_fini(struct ocp_device *ocpdev, int input)
47#define RGMII_SP3_100 0x00000200 43{
48#define RGMII_SP3_1000 0x00000400 44 if (ocpdev)
45 __rgmii_fini(ocpdev, input);
46}
49 47
50#define RGMII_MII2_SPDMASK 0x00000007 48void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed);
51#define RGMII_MII3_SPDMASK 0x00000700
52 49
53#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000 50int __rgmii_get_regs_len(struct ocp_device *ocpdev);
54#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100 51static inline int rgmii_get_regs_len(struct ocp_device *ocpdev)
55#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000) 52{
56#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000 53 return ocpdev ? __rgmii_get_regs_len(ocpdev) : 0;
57#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100 54}
58#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000)
59 55
60#define RTBI 0 56void *rgmii_dump_regs(struct ocp_device *ocpdev, void *buf);
61#define RGMII 1 57#else
62#define TBI 2 58# define rgmii_attach(x) 0
63#define GMII 3 59# define rgmii_fini(x,y) ((void)0)
60# define rgmii_set_speed(x,y,z) ((void)0)
61# define rgmii_get_regs_len(x) 0
62# define rgmii_dump_regs(x,buf) (buf)
63#endif /* !CONFIG_IBM_EMAC_RGMII */
64 64
65#endif /* _IBM_EMAC_RGMII_H_ */ 65#endif /* _IBM_EMAC_RGMII_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.c b/drivers/net/ibm_emac/ibm_emac_tah.c
new file mode 100644
index 000000000000..af08afc22f9f
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_tah.c
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_tah.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 *
6 * Copyright 2004 MontaVista Software, Inc.
7 * Matt Porter <mporter@kernel.crashing.org>
8 *
9 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/config.h>
17#include <asm/io.h>
18
19#include "ibm_emac_core.h"
20
21static int __init tah_init(struct ocp_device *ocpdev)
22{
23 struct tah_regs *p;
24
25 if (ocp_get_drvdata(ocpdev)) {
26 printk(KERN_ERR "tah%d: already in use!\n", ocpdev->def->index);
27 return -EBUSY;
28 }
29
30 /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
31 p = (struct tah_regs *)ioremap(ocpdev->def->paddr, sizeof(*p));
32 if (!p) {
33 printk(KERN_ERR "tah%d: could not ioremap device registers!\n",
34 ocpdev->def->index);
35 return -ENOMEM;
36 }
37 ocp_set_drvdata(ocpdev, p);
38 __tah_reset(ocpdev);
39
40 return 0;
41}
42
43int __init tah_attach(void *emac)
44{
45 struct ocp_enet_private *dev = emac;
46 struct ocp_func_emac_data *emacdata = dev->def->additions;
47
48 /* Check if we need to attach to a TAH */
49 if (emacdata->tah_idx >= 0) {
50 dev->tah_dev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH,
51 emacdata->tah_idx);
52 if (!dev->tah_dev) {
53 printk(KERN_ERR "emac%d: unknown tah%d!\n",
54 dev->def->index, emacdata->tah_idx);
55 return -ENODEV;
56 }
57 if (tah_init(dev->tah_dev)) {
58 printk(KERN_ERR
59 "emac%d: tah%d initialization failed!\n",
60 dev->def->index, emacdata->tah_idx);
61 return -ENODEV;
62 }
63 }
64 return 0;
65}
66
67void __exit __tah_fini(struct ocp_device *ocpdev)
68{
69 struct tah_regs *p = ocp_get_drvdata(ocpdev);
70 BUG_ON(!p);
71 ocp_set_drvdata(ocpdev, NULL);
72 iounmap((void *)p);
73}
74
75void __tah_reset(struct ocp_device *ocpdev)
76{
77 struct tah_regs *p = ocp_get_drvdata(ocpdev);
78 int n;
79
80 /* Reset TAH */
81 out_be32(&p->mr, TAH_MR_SR);
82 n = 100;
83 while ((in_be32(&p->mr) & TAH_MR_SR) && n)
84 --n;
85
86 if (unlikely(!n))
87 printk(KERN_ERR "tah%d: reset timeout\n", ocpdev->def->index);
88
89 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
90 out_be32(&p->mr,
91 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
92 TAH_MR_DIG);
93}
94
95int __tah_get_regs_len(struct ocp_device *ocpdev)
96{
97 return sizeof(struct emac_ethtool_regs_subhdr) +
98 sizeof(struct tah_regs);
99}
100
101void *tah_dump_regs(struct ocp_device *ocpdev, void *buf)
102{
103 struct tah_regs *dev = ocp_get_drvdata(ocpdev);
104 struct emac_ethtool_regs_subhdr *hdr = buf;
105 struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
106
107 hdr->version = 0;
108 hdr->index = ocpdev->def->index;
109 memcpy_fromio(regs, dev, sizeof(struct tah_regs));
110 return regs + 1;
111}
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h
index ecfc69805521..9299b5dd7eb1 100644
--- a/drivers/net/ibm_emac/ibm_emac_tah.h
+++ b/drivers/net/ibm_emac/ibm_emac_tah.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * Defines for the IBM TAH 2 * drivers/net/ibm_emac/ibm_emac_tah.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
3 * 5 *
4 * Copyright 2004 MontaVista Software, Inc. 6 * Copyright 2004 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 7 * Matt Porter <mporter@kernel.crashing.org>
6 * 8 *
9 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
10 *
7 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 13 * Free Software Foundation; either version 2 of the License, or (at your
@@ -13,36 +17,72 @@
13#ifndef _IBM_EMAC_TAH_H 17#ifndef _IBM_EMAC_TAH_H
14#define _IBM_EMAC_TAH_H 18#define _IBM_EMAC_TAH_H
15 19
20#include <linux/config.h>
21#include <linux/init.h>
22#include <asm/ocp.h>
23
16/* TAH */ 24/* TAH */
17typedef struct tah_regs { 25struct tah_regs {
18 u32 tah_revid; 26 u32 revid;
19 u32 pad[3]; 27 u32 pad[3];
20 u32 tah_mr; 28 u32 mr;
21 u32 tah_ssr0; 29 u32 ssr0;
22 u32 tah_ssr1; 30 u32 ssr1;
23 u32 tah_ssr2; 31 u32 ssr2;
24 u32 tah_ssr3; 32 u32 ssr3;
25 u32 tah_ssr4; 33 u32 ssr4;
26 u32 tah_ssr5; 34 u32 ssr5;
27 u32 tah_tsr; 35 u32 tsr;
28} tah_t; 36};
29 37
30/* TAH engine */ 38/* TAH engine */
31#define TAH_MR_CVR 0x80000000 39#define TAH_MR_CVR 0x80000000
32#define TAH_MR_SR 0x40000000 40#define TAH_MR_SR 0x40000000
33#define TAH_MR_ST_256 0x01000000 41#define TAH_MR_ST_256 0x01000000
34#define TAH_MR_ST_512 0x02000000 42#define TAH_MR_ST_512 0x02000000
35#define TAH_MR_ST_768 0x03000000 43#define TAH_MR_ST_768 0x03000000
36#define TAH_MR_ST_1024 0x04000000 44#define TAH_MR_ST_1024 0x04000000
37#define TAH_MR_ST_1280 0x05000000 45#define TAH_MR_ST_1280 0x05000000
38#define TAH_MR_ST_1536 0x06000000 46#define TAH_MR_ST_1536 0x06000000
39#define TAH_MR_TFS_16KB 0x00000000 47#define TAH_MR_TFS_16KB 0x00000000
40#define TAH_MR_TFS_2KB 0x00200000 48#define TAH_MR_TFS_2KB 0x00200000
41#define TAH_MR_TFS_4KB 0x00400000 49#define TAH_MR_TFS_4KB 0x00400000
42#define TAH_MR_TFS_6KB 0x00600000 50#define TAH_MR_TFS_6KB 0x00600000
43#define TAH_MR_TFS_8KB 0x00800000 51#define TAH_MR_TFS_8KB 0x00800000
44#define TAH_MR_TFS_10KB 0x00a00000 52#define TAH_MR_TFS_10KB 0x00a00000
45#define TAH_MR_DTFP 0x00100000 53#define TAH_MR_DTFP 0x00100000
46#define TAH_MR_DIG 0x00080000 54#define TAH_MR_DIG 0x00080000
55
56#ifdef CONFIG_IBM_EMAC_TAH
57int tah_attach(void *emac) __init;
58
59void __tah_fini(struct ocp_device *ocpdev) __exit;
60static inline void tah_fini(struct ocp_device *ocpdev)
61{
62 if (ocpdev)
63 __tah_fini(ocpdev);
64}
65
66void __tah_reset(struct ocp_device *ocpdev);
67static inline void tah_reset(struct ocp_device *ocpdev)
68{
69 if (ocpdev)
70 __tah_reset(ocpdev);
71}
72
73int __tah_get_regs_len(struct ocp_device *ocpdev);
74static inline int tah_get_regs_len(struct ocp_device *ocpdev)
75{
76 return ocpdev ? __tah_get_regs_len(ocpdev) : 0;
77}
78
79void *tah_dump_regs(struct ocp_device *ocpdev, void *buf);
80#else
81# define tah_attach(x) 0
82# define tah_fini(x) ((void)0)
83# define tah_reset(x) ((void)0)
84# define tah_get_regs_len(x) 0
85# define tah_dump_regs(x,buf) (buf)
86#endif /* !CONFIG_IBM_EMAC_TAH */
47 87
48#endif /* _IBM_EMAC_TAH_H */ 88#endif /* _IBM_EMAC_TAH_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c
new file mode 100644
index 000000000000..35c1185079ed
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.c
@@ -0,0 +1,255 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_zmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2001 MontaVista Softare Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/ethtool.h>
22#include <asm/io.h>
23
24#include "ibm_emac_core.h"
25#include "ibm_emac_debug.h"
26
27/* ZMIIx_FER */
28#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4))
29#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \
30 ZMII_FER_MDI(2) | ZMII_FER_MDI(3))
31
32#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4))
33#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4))
34#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4))
35
36/* ZMIIx_SSR */
37#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4))
38#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4))
39#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4))
40
41/* ZMII only supports MII, RMII and SMII
42 * we also support autodetection for backward compatibility
43 */
44static inline int zmii_valid_mode(int mode)
45{
46 return mode == PHY_MODE_MII ||
47 mode == PHY_MODE_RMII ||
48 mode == PHY_MODE_SMII ||
49 mode == PHY_MODE_NA;
50}
51
52static inline const char *zmii_mode_name(int mode)
53{
54 switch (mode) {
55 case PHY_MODE_MII:
56 return "MII";
57 case PHY_MODE_RMII:
58 return "RMII";
59 case PHY_MODE_SMII:
60 return "SMII";
61 default:
62 BUG();
63 }
64}
65
66static inline u32 zmii_mode_mask(int mode, int input)
67{
68 switch (mode) {
69 case PHY_MODE_MII:
70 return ZMII_FER_MII(input);
71 case PHY_MODE_RMII:
72 return ZMII_FER_RMII(input);
73 case PHY_MODE_SMII:
74 return ZMII_FER_SMII(input);
75 default:
76 return 0;
77 }
78}
79
80static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode)
81{
82 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
83 struct zmii_regs *p;
84
85 ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode);
86
87 if (!dev) {
88 dev = kzalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
89 if (!dev) {
90 printk(KERN_ERR
91 "zmii%d: couldn't allocate device structure!\n",
92 ocpdev->def->index);
93 return -ENOMEM;
94 }
95 dev->mode = PHY_MODE_NA;
96
97 p = (struct zmii_regs *)ioremap(ocpdev->def->paddr,
98 sizeof(struct zmii_regs));
99 if (!p) {
100 printk(KERN_ERR
101 "zmii%d: could not ioremap device registers!\n",
102 ocpdev->def->index);
103 kfree(dev);
104 return -ENOMEM;
105 }
106 dev->base = p;
107 ocp_set_drvdata(ocpdev, dev);
108
109 /* We may need FER value for autodetection later */
110 dev->fer_save = in_be32(&p->fer);
111
112 /* Disable all inputs by default */
113 out_be32(&p->fer, 0);
114 } else
115 p = dev->base;
116
117 if (!zmii_valid_mode(*mode)) {
118 /* Probably an EMAC connected to RGMII,
119 * but it still may need ZMII for MDIO
120 */
121 goto out;
122 }
123
124 /* Autodetect ZMII mode if not specified.
125 * This is only for backward compatibility with the old driver.
126 * Please, always specify PHY mode in your board port to avoid
127 * any surprises.
128 */
129 if (dev->mode == PHY_MODE_NA) {
130 if (*mode == PHY_MODE_NA) {
131 u32 r = dev->fer_save;
132
133 ZMII_DBG("%d: autodetecting mode, FER = 0x%08x" NL,
134 ocpdev->def->index, r);
135
136 if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
137 dev->mode = PHY_MODE_MII;
138 else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
139 dev->mode = PHY_MODE_RMII;
140 else
141 dev->mode = PHY_MODE_SMII;
142 } else
143 dev->mode = *mode;
144
145 printk(KERN_NOTICE "zmii%d: bridge in %s mode\n",
146 ocpdev->def->index, zmii_mode_name(dev->mode));
147 } else {
148 /* All inputs must use the same mode */
149 if (*mode != PHY_MODE_NA && *mode != dev->mode) {
150 printk(KERN_ERR
151 "zmii%d: invalid mode %d specified for input %d\n",
152 ocpdev->def->index, *mode, input);
153 return -EINVAL;
154 }
155 }
156
157 /* Report back correct PHY mode,
158 * it may be used during PHY initialization.
159 */
160 *mode = dev->mode;
161
162 /* Enable this input */
163 out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
164 out:
165 ++dev->users;
166 return 0;
167}
168
169int __init zmii_attach(void *emac)
170{
171 struct ocp_enet_private *dev = emac;
172 struct ocp_func_emac_data *emacdata = dev->def->additions;
173
174 if (emacdata->zmii_idx >= 0) {
175 dev->zmii_input = emacdata->zmii_mux;
176 dev->zmii_dev =
177 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_ZMII,
178 emacdata->zmii_idx);
179 if (!dev->zmii_dev) {
180 printk(KERN_ERR "emac%d: unknown zmii%d!\n",
181 dev->def->index, emacdata->zmii_idx);
182 return -ENODEV;
183 }
184 if (zmii_init
185 (dev->zmii_dev, dev->zmii_input, &emacdata->phy_mode)) {
186 printk(KERN_ERR
187 "emac%d: zmii%d initialization failed!\n",
188 dev->def->index, emacdata->zmii_idx);
189 return -ENODEV;
190 }
191 }
192 return 0;
193}
194
195void __zmii_enable_mdio(struct ocp_device *ocpdev, int input)
196{
197 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
198 u32 fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL;
199
200 ZMII_DBG2("%d: mdio(%d)" NL, ocpdev->def->index, input);
201
202 out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
203}
204
205void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
206{
207 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
208 u32 ssr = in_be32(&dev->base->ssr);
209
210 ZMII_DBG("%d: speed(%d, %d)" NL, ocpdev->def->index, input, speed);
211
212 if (speed == SPEED_100)
213 ssr |= ZMII_SSR_SP(input);
214 else
215 ssr &= ~ZMII_SSR_SP(input);
216
217 out_be32(&dev->base->ssr, ssr);
218}
219
220void __exit __zmii_fini(struct ocp_device *ocpdev, int input)
221{
222 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
223 BUG_ON(!dev || dev->users == 0);
224
225 ZMII_DBG("%d: fini(%d)" NL, ocpdev->def->index, input);
226
227 /* Disable this input */
228 out_be32(&dev->base->fer,
229 in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input));
230
231 if (!--dev->users) {
232 /* Free everything if this is the last user */
233 ocp_set_drvdata(ocpdev, NULL);
234 iounmap((void *)dev->base);
235 kfree(dev);
236 }
237}
238
239int __zmii_get_regs_len(struct ocp_device *ocpdev)
240{
241 return sizeof(struct emac_ethtool_regs_subhdr) +
242 sizeof(struct zmii_regs);
243}
244
245void *zmii_dump_regs(struct ocp_device *ocpdev, void *buf)
246{
247 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
248 struct emac_ethtool_regs_subhdr *hdr = buf;
249 struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
250
251 hdr->version = 0;
252 hdr->index = ocpdev->def->index;
253 memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
254 return regs + 1;
255}
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
index 6f6cd2a39e38..0bb26062c0ad 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -1,23 +1,27 @@
1/* 1/*
2 * ocp_zmii.h 2 * drivers/net/ibm_emac/ibm_emac_zmii.h
3 * 3 *
4 * Defines for the IBM ZMII bridge 4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 * 5 *
6 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Dec, 2001 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 * 8 *
9 * Copyright 2001 MontaVista Softare Inc. 9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2001 MontaVista Softare Inc.
10 * 12 *
11 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 14 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 15 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 16 * option) any later version.
17 *
15 */ 18 */
16
17#ifndef _IBM_EMAC_ZMII_H_ 19#ifndef _IBM_EMAC_ZMII_H_
18#define _IBM_EMAC_ZMII_H_ 20#define _IBM_EMAC_ZMII_H_
19 21
20#include <linux/config.h> 22#include <linux/config.h>
23#include <linux/init.h>
24#include <asm/ocp.h>
21 25
22/* ZMII bridge registers */ 26/* ZMII bridge registers */
23struct zmii_regs { 27struct zmii_regs {
@@ -26,68 +30,54 @@ struct zmii_regs {
26 u32 smiirs; /* SMII status reg */ 30 u32 smiirs; /* SMII status reg */
27}; 31};
28 32
29#define ZMII_INPUTS 4
30
31/* ZMII device */ 33/* ZMII device */
32struct ibm_ocp_zmii { 34struct ibm_ocp_zmii {
33 struct zmii_regs *base; 35 struct zmii_regs *base;
34 int mode[ZMII_INPUTS]; 36 int mode; /* subset of PHY_MODE_XXXX */
35 int users; /* number of EMACs using this ZMII bridge */ 37 int users; /* number of EMACs using this ZMII bridge */
38 u32 fer_save; /* FER value left by firmware */
36}; 39};
37 40
38/* Fuctional Enable Reg */ 41#ifdef CONFIG_IBM_EMAC_ZMII
39 42int zmii_attach(void *emac) __init;
40#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x))
41
42#define ZMII_MDI0 0x80000000
43#define ZMII_SMII0 0x40000000
44#define ZMII_RMII0 0x20000000
45#define ZMII_MII0 0x10000000
46#define ZMII_MDI1 0x08000000
47#define ZMII_SMII1 0x04000000
48#define ZMII_RMII1 0x02000000
49#define ZMII_MII1 0x01000000
50#define ZMII_MDI2 0x00800000
51#define ZMII_SMII2 0x00400000
52#define ZMII_RMII2 0x00200000
53#define ZMII_MII2 0x00100000
54#define ZMII_MDI3 0x00080000
55#define ZMII_SMII3 0x00040000
56#define ZMII_RMII3 0x00020000
57#define ZMII_MII3 0x00010000
58 43
59/* Speed Selection reg */ 44void __zmii_fini(struct ocp_device *ocpdev, int input) __exit;
45static inline void zmii_fini(struct ocp_device *ocpdev, int input)
46{
47 if (ocpdev)
48 __zmii_fini(ocpdev, input);
49}
60 50
61#define ZMII_SCI0 0x40000000 51void __zmii_enable_mdio(struct ocp_device *ocpdev, int input);
62#define ZMII_FSS0 0x20000000 52static inline void zmii_enable_mdio(struct ocp_device *ocpdev, int input)
63#define ZMII_SP0 0x10000000 53{
64#define ZMII_SCI1 0x04000000 54 if (ocpdev)
65#define ZMII_FSS1 0x02000000 55 __zmii_enable_mdio(ocpdev, input);
66#define ZMII_SP1 0x01000000 56}
67#define ZMII_SCI2 0x00400000
68#define ZMII_FSS2 0x00200000
69#define ZMII_SP2 0x00100000
70#define ZMII_SCI3 0x00040000
71#define ZMII_FSS3 0x00020000
72#define ZMII_SP3 0x00010000
73 57
74#define ZMII_MII0_100MB ZMII_SP0 58void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed);
75#define ZMII_MII0_10MB ~ZMII_SP0 59static inline void zmii_set_speed(struct ocp_device *ocpdev, int input,
76#define ZMII_MII1_100MB ZMII_SP1 60 int speed)
77#define ZMII_MII1_10MB ~ZMII_SP1 61{
78#define ZMII_MII2_100MB ZMII_SP2 62 if (ocpdev)
79#define ZMII_MII2_10MB ~ZMII_SP2 63 __zmii_set_speed(ocpdev, input, speed);
80#define ZMII_MII3_100MB ZMII_SP3 64}
81#define ZMII_MII3_10MB ~ZMII_SP3
82 65
83/* SMII Status reg */ 66int __zmii_get_regs_len(struct ocp_device *ocpdev);
67static inline int zmii_get_regs_len(struct ocp_device *ocpdev)
68{
69 return ocpdev ? __zmii_get_regs_len(ocpdev) : 0;
70}
84 71
85#define ZMII_STS0 0xFF000000 /* EMAC0 smii status mask */ 72void *zmii_dump_regs(struct ocp_device *ocpdev, void *buf);
86#define ZMII_STS1 0x00FF0000 /* EMAC1 smii status mask */
87 73
88#define SMII 0 74#else
89#define RMII 1 75# define zmii_attach(x) 0
90#define MII 2 76# define zmii_fini(x,y) ((void)0)
91#define MDI 3 77# define zmii_enable_mdio(x,y) ((void)0)
78# define zmii_set_speed(x,y,z) ((void)0)
79# define zmii_get_regs_len(x) 0
80# define zmii_dump_regs(x,buf) (buf)
81#endif /* !CONFIG_IBM_EMAC_ZMII */
92 82
93#endif /* _IBM_EMAC_ZMII_H_ */ 83#endif /* _IBM_EMAC_ZMII_H_ */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a2c4dd4fb221..36da54ad2b7b 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,7 +96,7 @@ static void ibmveth_proc_unregister_driver(void);
96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 99static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100 100
101#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
102#define IBMVETH_PROC_DIR "net/ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -181,6 +181,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
181 atomic_set(&pool->available, 0); 181 atomic_set(&pool->available, 0);
182 pool->producer_index = 0; 182 pool->producer_index = 0;
183 pool->consumer_index = 0; 183 pool->consumer_index = 0;
184 pool->active = 0;
184 185
185 return 0; 186 return 0;
186} 187}
@@ -236,7 +237,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 237 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
237 238
238 if(lpar_rc != H_Success) { 239 if(lpar_rc != H_Success) {
239 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 240 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL; 241 pool->skbuff[index] = NULL;
241 pool->consumer_index--; 242 pool->consumer_index--;
242 dma_unmap_single(&adapter->vdev->dev, 243 dma_unmap_single(&adapter->vdev->dev,
@@ -255,37 +256,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 atomic_add(buffers_added, &(pool->available)); 256 atomic_add(buffers_added, &(pool->available));
256} 257}
257 258
258/* check if replenishing is needed. */ 259/* replenish routine */
259static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
260{
261 return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
262 (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
263 (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
264}
265
266/* kick the replenish tasklet if we need replenishing and it isn't already running */
267static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
268{
269 if(ibmveth_is_replenishing_needed(adapter) &&
270 (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
271 schedule_work(&adapter->replenish_task);
272 }
273}
274
275/* replenish tasklet routine */
276static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 260static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
277{ 261{
262 int i;
263
278 adapter->replenish_task_cycles++; 264 adapter->replenish_task_cycles++;
279 265
280 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 266 for(i = 0; i < IbmVethNumBufferPools; i++)
281 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 267 if(adapter->rx_buff_pool[i].active)
282 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 268 ibmveth_replenish_buffer_pool(adapter,
269 &adapter->rx_buff_pool[i]);
283 270
284 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
285
286 atomic_inc(&adapter->not_replenishing);
287
288 ibmveth_schedule_replenishing(adapter);
289} 272}
290 273
291/* empty and free ana buffer pool - also used to do cleanup in error paths */ 274/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -293,10 +276,8 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
293{ 276{
294 int i; 277 int i;
295 278
296 if(pool->free_map) { 279 kfree(pool->free_map);
297 kfree(pool->free_map); 280 pool->free_map = NULL;
298 pool->free_map = NULL;
299 }
300 281
301 if(pool->skbuff && pool->dma_addr) { 282 if(pool->skbuff && pool->dma_addr) {
302 for(i = 0; i < pool->size; ++i) { 283 for(i = 0; i < pool->size; ++i) {
@@ -321,6 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
321 kfree(pool->skbuff); 302 kfree(pool->skbuff);
322 pool->skbuff = NULL; 303 pool->skbuff = NULL;
323 } 304 }
305 pool->active = 0;
324} 306}
325 307
326/* remove a buffer from a pool */ 308/* remove a buffer from a pool */
@@ -379,6 +361,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
379 ibmveth_assert(pool < IbmVethNumBufferPools); 361 ibmveth_assert(pool < IbmVethNumBufferPools);
380 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 362 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
381 363
364 if(!adapter->rx_buff_pool[pool].active) {
365 ibmveth_rxq_harvest_buffer(adapter);
366 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
367 return;
368 }
369
382 desc.desc = 0; 370 desc.desc = 0;
383 desc.fields.valid = 1; 371 desc.fields.valid = 1;
384 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; 372 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@ -409,6 +397,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
409 397
410static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 398static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
411{ 399{
400 int i;
401
412 if(adapter->buffer_list_addr != NULL) { 402 if(adapter->buffer_list_addr != NULL) {
413 if(!dma_mapping_error(adapter->buffer_list_dma)) { 403 if(!dma_mapping_error(adapter->buffer_list_dma)) {
414 dma_unmap_single(&adapter->vdev->dev, 404 dma_unmap_single(&adapter->vdev->dev,
@@ -443,26 +433,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
443 adapter->rx_queue.queue_addr = NULL; 433 adapter->rx_queue.queue_addr = NULL;
444 } 434 }
445 435
446 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 436 for(i = 0; i<IbmVethNumBufferPools; i++)
447 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 437 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
448 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
449} 438}
450 439
451static int ibmveth_open(struct net_device *netdev) 440static int ibmveth_open(struct net_device *netdev)
452{ 441{
453 struct ibmveth_adapter *adapter = netdev->priv; 442 struct ibmveth_adapter *adapter = netdev->priv;
454 u64 mac_address = 0; 443 u64 mac_address = 0;
455 int rxq_entries; 444 int rxq_entries = 1;
456 unsigned long lpar_rc; 445 unsigned long lpar_rc;
457 int rc; 446 int rc;
458 union ibmveth_buf_desc rxq_desc; 447 union ibmveth_buf_desc rxq_desc;
448 int i;
459 449
460 ibmveth_debug_printk("open starting\n"); 450 ibmveth_debug_printk("open starting\n");
461 451
462 rxq_entries = 452 for(i = 0; i<IbmVethNumBufferPools; i++)
463 adapter->rx_buff_pool[0].size + 453 rxq_entries += adapter->rx_buff_pool[i].size;
464 adapter->rx_buff_pool[1].size +
465 adapter->rx_buff_pool[2].size + 1;
466 454
467 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 455 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
468 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 456 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -502,14 +490,8 @@ static int ibmveth_open(struct net_device *netdev)
502 adapter->rx_queue.num_slots = rxq_entries; 490 adapter->rx_queue.num_slots = rxq_entries;
503 adapter->rx_queue.toggle = 1; 491 adapter->rx_queue.toggle = 1;
504 492
505 if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || 493 /* call change_mtu to init the buffer pools based in initial mtu */
506 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || 494 ibmveth_change_mtu(netdev, netdev->mtu);
507 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
508 {
509 ibmveth_error_printk("unable to allocate buffer pools\n");
510 ibmveth_cleanup(adapter);
511 return -ENOMEM;
512 }
513 495
514 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 496 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
515 mac_address = mac_address >> 16; 497 mac_address = mac_address >> 16;
@@ -552,10 +534,10 @@ static int ibmveth_open(struct net_device *netdev)
552 return rc; 534 return rc;
553 } 535 }
554 536
555 netif_start_queue(netdev); 537 ibmveth_debug_printk("initial replenish cycle\n");
538 ibmveth_replenish_task(adapter);
556 539
557 ibmveth_debug_printk("scheduling initial replenish cycle\n"); 540 netif_start_queue(netdev);
558 ibmveth_schedule_replenishing(adapter);
559 541
560 ibmveth_debug_printk("open complete\n"); 542 ibmveth_debug_printk("open complete\n");
561 543
@@ -573,9 +555,6 @@ static int ibmveth_close(struct net_device *netdev)
573 555
574 free_irq(netdev->irq, netdev); 556 free_irq(netdev->irq, netdev);
575 557
576 cancel_delayed_work(&adapter->replenish_task);
577 flush_scheduled_work();
578
579 do { 558 do {
580 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 559 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
581 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 560 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -640,12 +619,18 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
640 unsigned long lpar_rc; 619 unsigned long lpar_rc;
641 int nfrags = 0, curfrag; 620 int nfrags = 0, curfrag;
642 unsigned long correlator; 621 unsigned long correlator;
622 unsigned long flags;
643 unsigned int retry_count; 623 unsigned int retry_count;
624 unsigned int tx_dropped = 0;
625 unsigned int tx_bytes = 0;
626 unsigned int tx_packets = 0;
627 unsigned int tx_send_failed = 0;
628 unsigned int tx_map_failed = 0;
629
644 630
645 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { 631 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
646 adapter->stats.tx_dropped++; 632 tx_dropped++;
647 dev_kfree_skb(skb); 633 goto out;
648 return 0;
649 } 634 }
650 635
651 memset(&desc, 0, sizeof(desc)); 636 memset(&desc, 0, sizeof(desc));
@@ -664,10 +649,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
664 649
665 if(dma_mapping_error(desc[0].fields.address)) { 650 if(dma_mapping_error(desc[0].fields.address)) {
666 ibmveth_error_printk("tx: unable to map initial fragment\n"); 651 ibmveth_error_printk("tx: unable to map initial fragment\n");
667 adapter->tx_map_failed++; 652 tx_map_failed++;
668 adapter->stats.tx_dropped++; 653 tx_dropped++;
669 dev_kfree_skb(skb); 654 goto out;
670 return 0;
671 } 655 }
672 656
673 curfrag = nfrags; 657 curfrag = nfrags;
@@ -684,8 +668,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
684 668
685 if(dma_mapping_error(desc[curfrag+1].fields.address)) { 669 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
686 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); 670 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
687 adapter->tx_map_failed++; 671 tx_map_failed++;
688 adapter->stats.tx_dropped++; 672 tx_dropped++;
689 /* Free all the mappings we just created */ 673 /* Free all the mappings we just created */
690 while(curfrag < nfrags) { 674 while(curfrag < nfrags) {
691 dma_unmap_single(&adapter->vdev->dev, 675 dma_unmap_single(&adapter->vdev->dev,
@@ -694,8 +678,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
694 DMA_TO_DEVICE); 678 DMA_TO_DEVICE);
695 curfrag++; 679 curfrag++;
696 } 680 }
697 dev_kfree_skb(skb); 681 goto out;
698 return 0;
699 } 682 }
700 } 683 }
701 684
@@ -720,11 +703,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
720 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, 703 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
721 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); 704 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
722 } 705 }
723 adapter->tx_send_failed++; 706 tx_send_failed++;
724 adapter->stats.tx_dropped++; 707 tx_dropped++;
725 } else { 708 } else {
726 adapter->stats.tx_packets++; 709 tx_packets++;
727 adapter->stats.tx_bytes += skb->len; 710 tx_bytes += skb->len;
711 netdev->trans_start = jiffies;
728 } 712 }
729 713
730 do { 714 do {
@@ -733,6 +717,14 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
733 desc[nfrags].fields.length, DMA_TO_DEVICE); 717 desc[nfrags].fields.length, DMA_TO_DEVICE);
734 } while(--nfrags >= 0); 718 } while(--nfrags >= 0);
735 719
720out: spin_lock_irqsave(&adapter->stats_lock, flags);
721 adapter->stats.tx_dropped += tx_dropped;
722 adapter->stats.tx_bytes += tx_bytes;
723 adapter->stats.tx_packets += tx_packets;
724 adapter->tx_send_failed += tx_send_failed;
725 adapter->tx_map_failed += tx_map_failed;
726 spin_unlock_irqrestore(&adapter->stats_lock, flags);
727
736 dev_kfree_skb(skb); 728 dev_kfree_skb(skb);
737 return 0; 729 return 0;
738} 730}
@@ -776,13 +768,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
776 adapter->stats.rx_packets++; 768 adapter->stats.rx_packets++;
777 adapter->stats.rx_bytes += length; 769 adapter->stats.rx_bytes += length;
778 frames_processed++; 770 frames_processed++;
771 netdev->last_rx = jiffies;
779 } 772 }
780 } else { 773 } else {
781 more_work = 0; 774 more_work = 0;
782 } 775 }
783 } while(more_work && (frames_processed < max_frames_to_process)); 776 } while(more_work && (frames_processed < max_frames_to_process));
784 777
785 ibmveth_schedule_replenishing(adapter); 778 ibmveth_replenish_task(adapter);
786 779
787 if(more_work) { 780 if(more_work) {
788 /* more work to do - return that we are not done yet */ 781 /* more work to do - return that we are not done yet */
@@ -883,17 +876,54 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
883 876
884static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 877static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
885{ 878{
886 if ((new_mtu < 68) || (new_mtu > (1<<20))) 879 struct ibmveth_adapter *adapter = dev->priv;
880 int i;
881 int prev_smaller = 1;
882
883 if ((new_mtu < 68) ||
884 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
887 return -EINVAL; 885 return -EINVAL;
886
887 for(i = 0; i<IbmVethNumBufferPools; i++) {
888 int activate = 0;
889 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
890 activate = 1;
891 prev_smaller= 1;
892 } else {
893 if (prev_smaller)
894 activate = 1;
895 prev_smaller= 0;
896 }
897
898 if (activate && !adapter->rx_buff_pool[i].active) {
899 struct ibmveth_buff_pool *pool =
900 &adapter->rx_buff_pool[i];
901 if(ibmveth_alloc_buffer_pool(pool)) {
902 ibmveth_error_printk("unable to alloc pool\n");
903 return -ENOMEM;
904 }
905 adapter->rx_buff_pool[i].active = 1;
906 } else if (!activate && adapter->rx_buff_pool[i].active) {
907 adapter->rx_buff_pool[i].active = 0;
908 h_free_logical_lan_buffer(adapter->vdev->unit_address,
909 (u64)pool_size[i]);
910 }
911
912 }
913
914 /* kick the interrupt handler so that the new buffer pools get
915 replenished or deallocated */
916 ibmveth_interrupt(dev->irq, dev, NULL);
917
888 dev->mtu = new_mtu; 918 dev->mtu = new_mtu;
889 return 0; 919 return 0;
890} 920}
891 921
892static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 922static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
893{ 923{
894 int rc; 924 int rc, i;
895 struct net_device *netdev; 925 struct net_device *netdev;
896 struct ibmveth_adapter *adapter; 926 struct ibmveth_adapter *adapter = NULL;
897 927
898 unsigned char *mac_addr_p; 928 unsigned char *mac_addr_p;
899 unsigned int *mcastFilterSize_p; 929 unsigned int *mcastFilterSize_p;
@@ -960,23 +990,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
960 netdev->ethtool_ops = &netdev_ethtool_ops; 990 netdev->ethtool_ops = &netdev_ethtool_ops;
961 netdev->change_mtu = ibmveth_change_mtu; 991 netdev->change_mtu = ibmveth_change_mtu;
962 SET_NETDEV_DEV(netdev, &dev->dev); 992 SET_NETDEV_DEV(netdev, &dev->dev);
993 netdev->features |= NETIF_F_LLTX;
994 spin_lock_init(&adapter->stats_lock);
963 995
964 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 996 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
965 997
966 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); 998 for(i = 0; i<IbmVethNumBufferPools; i++)
967 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); 999 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
968 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); 1000 pool_count[i], pool_size[i]);
969 1001
970 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1002 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
971 1003
972 INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
973
974 adapter->buffer_list_dma = DMA_ERROR_CODE; 1004 adapter->buffer_list_dma = DMA_ERROR_CODE;
975 adapter->filter_list_dma = DMA_ERROR_CODE; 1005 adapter->filter_list_dma = DMA_ERROR_CODE;
976 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1006 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
977 1007
978 atomic_set(&adapter->not_replenishing, 1);
979
980 ibmveth_debug_printk("registering netdev...\n"); 1008 ibmveth_debug_printk("registering netdev...\n");
981 1009
982 rc = register_netdev(netdev); 1010 rc = register_netdev(netdev);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 51a470da9686..46919a814fca 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -49,6 +49,7 @@
49#define H_SEND_LOGICAL_LAN 0x120 49#define H_SEND_LOGICAL_LAN 0x120
50#define H_MULTICAST_CTRL 0x130 50#define H_MULTICAST_CTRL 0x130
51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C 51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
52#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
52 53
53/* hcall macros */ 54/* hcall macros */
54#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ 55#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
@@ -69,13 +70,15 @@
69#define h_change_logical_lan_mac(ua, mac) \ 70#define h_change_logical_lan_mac(ua, mac) \
70 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 71 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
71 72
72#define IbmVethNumBufferPools 3 73#define h_free_logical_lan_buffer(ua, bufsize) \
73#define IbmVethPool0DftSize (1024 * 2) 74 plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize)
74#define IbmVethPool1DftSize (1024 * 4) 75
75#define IbmVethPool2DftSize (1024 * 10) 76#define IbmVethNumBufferPools 5
76#define IbmVethPool0DftCnt 256 77#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
77#define IbmVethPool1DftCnt 256 78
78#define IbmVethPool2DftCnt 256 79/* pool_size should be sorted */
80static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
81static int pool_count[] = { 256, 768, 256, 256, 256 };
79 82
80#define IBM_VETH_INVALID_MAP ((u16)0xffff) 83#define IBM_VETH_INVALID_MAP ((u16)0xffff)
81 84
@@ -90,6 +93,7 @@ struct ibmveth_buff_pool {
90 u16 *free_map; 93 u16 *free_map;
91 dma_addr_t *dma_addr; 94 dma_addr_t *dma_addr;
92 struct sk_buff **skbuff; 95 struct sk_buff **skbuff;
96 int active;
93}; 97};
94 98
95struct ibmveth_rx_q { 99struct ibmveth_rx_q {
@@ -114,10 +118,6 @@ struct ibmveth_adapter {
114 dma_addr_t filter_list_dma; 118 dma_addr_t filter_list_dma;
115 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 119 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
116 struct ibmveth_rx_q rx_queue; 120 struct ibmveth_rx_q rx_queue;
117 atomic_t not_replenishing;
118
119 /* helper tasks */
120 struct work_struct replenish_task;
121 121
122 /* adapter specific stats */ 122 /* adapter specific stats */
123 u64 replenish_task_cycles; 123 u64 replenish_task_cycles;
@@ -131,6 +131,7 @@ struct ibmveth_adapter {
131 u64 tx_linearize_failed; 131 u64 tx_linearize_failed;
132 u64 tx_map_failed; 132 u64 tx_map_failed;
133 u64 tx_send_failed; 133 u64 tx_send_failed;
134 spinlock_t stats_lock;
134}; 135};
135 136
136struct ibmveth_buf_desc_fields { 137struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 0a08c539c051..0282771b1cbb 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1695,11 +1695,9 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1695 1695
1696freebufs: 1696freebufs:
1697 for (i = 0; i < TX_SLOTS; ++i) 1697 for (i = 0; i < TX_SLOTS; ++i)
1698 if (self->tx_bufs[i]) 1698 kfree (self->tx_bufs[i]);
1699 kfree (self->tx_bufs[i]);
1700 for (i = 0; i < RX_SLOTS; ++i) 1699 for (i = 0; i < RX_SLOTS; ++i)
1701 if (self->rx_bufs[i]) 1700 kfree (self->rx_bufs[i]);
1702 kfree (self->rx_bufs[i]);
1703 kfree(self->ringbuf); 1701 kfree(self->ringbuf);
1704 1702
1705freeregion: 1703freeregion:
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 6c766fdc51a6..c22c0517883c 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1168,10 +1168,8 @@ static inline void irda_usb_close(struct irda_usb_cb *self)
1168 unregister_netdev(self->netdev); 1168 unregister_netdev(self->netdev);
1169 1169
1170 /* Remove the speed buffer */ 1170 /* Remove the speed buffer */
1171 if (self->speed_buff != NULL) { 1171 kfree(self->speed_buff);
1172 kfree(self->speed_buff); 1172 self->speed_buff = NULL;
1173 self->speed_buff = NULL;
1174 }
1175} 1173}
1176 1174
1177/********************** USB CONFIG SUBROUTINES **********************/ 1175/********************** USB CONFIG SUBROUTINES **********************/
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 5971315f3fa0..3d016a498e1d 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -235,8 +235,7 @@ static int irport_close(struct irport_cb *self)
235 __FUNCTION__, self->io.sir_base); 235 __FUNCTION__, self->io.sir_base);
236 release_region(self->io.sir_base, self->io.sir_ext); 236 release_region(self->io.sir_base, self->io.sir_ext);
237 237
238 if (self->tx_buff.head) 238 kfree(self->tx_buff.head);
239 kfree(self->tx_buff.head);
240 239
241 if (self->rx_buff.skb) 240 if (self->rx_buff.skb)
242 kfree_skb(self->rx_buff.skb); 241 kfree_skb(self->rx_buff.skb);
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index efc5a8870565..df22b8b532e7 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -490,8 +490,7 @@ static void sirdev_free_buffers(struct sir_dev *dev)
490{ 490{
491 if (dev->rx_buff.skb) 491 if (dev->rx_buff.skb)
492 kfree_skb(dev->rx_buff.skb); 492 kfree_skb(dev->rx_buff.skb);
493 if (dev->tx_buff.head) 493 kfree(dev->tx_buff.head);
494 kfree(dev->tx_buff.head);
495 dev->rx_buff.head = dev->tx_buff.head = NULL; 494 dev->rx_buff.head = dev->tx_buff.head = NULL;
496 dev->rx_buff.skb = NULL; 495 dev->rx_buff.skb = NULL;
497} 496}
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 651c5a6578fd..a9f49f058cfb 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -473,8 +473,7 @@ static int vlsi_free_ring(struct vlsi_ring *r)
473 rd_set_addr_status(rd, 0, 0); 473 rd_set_addr_status(rd, 0, 0);
474 if (busaddr) 474 if (busaddr)
475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
476 if (rd->buf) 476 kfree(rd->buf);
477 kfree(rd->buf);
478 } 477 }
479 kfree(r); 478 kfree(r);
480 return 0; 479 return 0;
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 81d0a26e4f41..09b1e7b364e5 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -1035,10 +1035,8 @@ static void __exit mace_cleanup(void)
1035{ 1035{
1036 macio_unregister_driver(&mace_driver); 1036 macio_unregister_driver(&mace_driver);
1037 1037
1038 if (dummy_buf) { 1038 kfree(dummy_buf);
1039 kfree(dummy_buf); 1039 dummy_buf = NULL;
1040 dummy_buf = NULL;
1041 }
1042} 1040}
1043 1041
1044MODULE_AUTHOR("Paul Mackerras"); 1042MODULE_AUTHOR("Paul Mackerras");
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index e531a4eedfee..d11821dd86ed 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -675,7 +675,6 @@ static int ne2k_pci_resume (struct pci_dev *pdev)
675 pci_set_power_state(pdev, 0); 675 pci_set_power_state(pdev, 0);
676 pci_restore_state(pdev); 676 pci_restore_state(pdev);
677 pci_enable_device(pdev); 677 pci_enable_device(pdev);
678 pci_set_master(pdev);
679 NS8390_init(dev, 1); 678 NS8390_init(dev, 1);
680 netif_device_attach(dev); 679 netif_device_attach(dev);
681 680
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 925d1dfcc4dc..bb42ff218484 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -696,8 +696,7 @@ static void ni65_free_buffer(struct priv *p)
696 return; 696 return;
697 697
698 for(i=0;i<TMDNUM;i++) { 698 for(i=0;i<TMDNUM;i++) {
699 if(p->tmdbounce[i]) 699 kfree(p->tmdbounce[i]);
700 kfree(p->tmdbounce[i]);
701#ifdef XMT_VIA_SKB 700#ifdef XMT_VIA_SKB
702 if(p->tmd_skb[i]) 701 if(p->tmd_skb[i])
703 dev_kfree_skb(p->tmd_skb[i]); 702 dev_kfree_skb(p->tmd_skb[i]);
@@ -710,12 +709,10 @@ static void ni65_free_buffer(struct priv *p)
710 if(p->recv_skb[i]) 709 if(p->recv_skb[i])
711 dev_kfree_skb(p->recv_skb[i]); 710 dev_kfree_skb(p->recv_skb[i]);
712#else 711#else
713 if(p->recvbounce[i]) 712 kfree(p->recvbounce[i]);
714 kfree(p->recvbounce[i]);
715#endif 713#endif
716 } 714 }
717 if(p->self) 715 kfree(p->self);
718 kfree(p->self);
719} 716}
720 717
721 718
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 9f22d138e3ad..818c185d6438 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1020,6 +1020,12 @@ static void set_misc_reg(struct net_device *dev)
1020 } else { 1020 } else {
1021 outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); 1021 outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
1022 } 1022 }
1023 } else if (info->flags & IS_DL10019) {
1024 /* Advertise 100F, 100H, 10F, 10H */
1025 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
1026 /* Restart MII autonegotiation */
1027 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
1028 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
1023 } 1029 }
1024} 1030}
1025 1031
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index ec1a18d189a1..19c2df9c86fe 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1710,10 +1710,8 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1710 error = -EFAULT; 1710 error = -EFAULT;
1711 } 1711 }
1712 wf_out: 1712 wf_out:
1713 if (oldimage) 1713 kfree(oldimage);
1714 kfree(oldimage); 1714 kfree(image);
1715 if (image)
1716 kfree(image);
1717 return error; 1715 return error;
1718 1716
1719 case SIOCRRID: 1717 case SIOCRRID:
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 62d5041845e3..3f5e93aad5c7 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -705,8 +705,7 @@ static void free_shared_mem(struct s2io_nic *nic)
705 } 705 }
706 kfree(mac_control->rings[i].ba[j]); 706 kfree(mac_control->rings[i].ba[j]);
707 } 707 }
708 if (mac_control->rings[i].ba) 708 kfree(mac_control->rings[i].ba);
709 kfree(mac_control->rings[i].ba);
710 } 709 }
711#endif 710#endif
712 711
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index fd0167077fbe..110e777f206e 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -997,10 +997,7 @@ static void __devexit saa9730_remove_one(struct pci_dev *pdev)
997 997
998 if (dev) { 998 if (dev) {
999 unregister_netdev(dev); 999 unregister_netdev(dev);
1000 1000 kfree(dev->priv);
1001 if (dev->priv)
1002 kfree(dev->priv);
1003
1004 free_netdev(dev); 1001 free_netdev(dev);
1005 pci_release_regions(pdev); 1002 pci_release_regions(pdev);
1006 pci_disable_device(pdev); 1003 pci_disable_device(pdev);
@@ -1096,8 +1093,7 @@ static int lan_saa9730_init(struct net_device *dev, int ioaddr, int irq)
1096 return 0; 1093 return 0;
1097 1094
1098 out: 1095 out:
1099 if (dev->priv) 1096 kfree(dev->priv);
1100 kfree(dev->priv);
1101 return ret; 1097 return ret;
1102} 1098}
1103 1099
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 92f75529eff8..478791e09bf7 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -842,7 +842,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) { 843 i++, mclist = mclist->next) {
844 int bit_nr = 844 int bit_nr =
845 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 845 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast; 847 rx_mode |= AcceptMulticast;
848 } 848 }
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 23b713c700b3..1d4d88680db1 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1696,15 +1696,20 @@ static int sis900_rx(struct net_device *net_dev)
1696 long ioaddr = net_dev->base_addr; 1696 long ioaddr = net_dev->base_addr;
1697 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1697 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1698 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1698 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1699 int rx_work_limit;
1699 1700
1700 if (netif_msg_rx_status(sis_priv)) 1701 if (netif_msg_rx_status(sis_priv))
1701 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d " 1702 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
1702 "status:0x%8.8x\n", 1703 "status:0x%8.8x\n",
1703 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status); 1704 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
1705 rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
1704 1706
1705 while (rx_status & OWN) { 1707 while (rx_status & OWN) {
1706 unsigned int rx_size; 1708 unsigned int rx_size;
1707 1709
1710 if (--rx_work_limit < 0)
1711 break;
1712
1708 rx_size = (rx_status & DSIZE) - CRC_SIZE; 1713 rx_size = (rx_status & DSIZE) - CRC_SIZE;
1709 1714
1710 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { 1715 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
@@ -1732,9 +1737,11 @@ static int sis900_rx(struct net_device *net_dev)
1732 we are working on NULL sk_buff :-( */ 1737 we are working on NULL sk_buff :-( */
1733 if (sis_priv->rx_skbuff[entry] == NULL) { 1738 if (sis_priv->rx_skbuff[entry] == NULL) {
1734 if (netif_msg_rx_err(sis_priv)) 1739 if (netif_msg_rx_err(sis_priv))
1735 printk(KERN_INFO "%s: NULL pointer " 1740 printk(KERN_WARNING "%s: NULL pointer "
1736 "encountered in Rx ring, skipping\n", 1741 "encountered in Rx ring\n"
1737 net_dev->name); 1742 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1743 net_dev->name, sis_priv->cur_rx,
1744 sis_priv->dirty_rx);
1738 break; 1745 break;
1739 } 1746 }
1740 1747
@@ -1770,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1770 sis_priv->rx_ring[entry].cmdsts = 0; 1777 sis_priv->rx_ring[entry].cmdsts = 0;
1771 sis_priv->rx_ring[entry].bufptr = 0; 1778 sis_priv->rx_ring[entry].bufptr = 0;
1772 sis_priv->stats.rx_dropped++; 1779 sis_priv->stats.rx_dropped++;
1780 sis_priv->cur_rx++;
1773 break; 1781 break;
1774 } 1782 }
1775 skb->dev = net_dev; 1783 skb->dev = net_dev;
@@ -1787,7 +1795,7 @@ static int sis900_rx(struct net_device *net_dev)
1787 1795
1788 /* refill the Rx buffer, what if the rate of refilling is slower 1796 /* refill the Rx buffer, what if the rate of refilling is slower
1789 * than consuming ?? */ 1797 * than consuming ?? */
1790 for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) { 1798 for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
1791 struct sk_buff *skb; 1799 struct sk_buff *skb;
1792 1800
1793 entry = sis_priv->dirty_rx % NUM_RX_DESC; 1801 entry = sis_priv->dirty_rx % NUM_RX_DESC;
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 0ddaa611cc61..c573bb351d4c 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1983,6 +1983,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
1983 if (lp->version >= (CHIP_91100 << 4)) 1983 if (lp->version >= (CHIP_91100 << 4))
1984 smc_phy_detect(dev); 1984 smc_phy_detect(dev);
1985 1985
1986 /* then shut everything down to save power */
1987 smc_shutdown(dev);
1988 smc_phy_powerdown(dev);
1989
1986 /* Set default parameters */ 1990 /* Set default parameters */
1987 lp->msg_enable = NETIF_MSG_LINK; 1991 lp->msg_enable = NETIF_MSG_LINK;
1988 lp->ctl_rfduplx = 0; 1992 lp->ctl_rfduplx = 0;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index efdb179ecc8c..38b2b0a3ce96 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1091,8 +1091,10 @@ static int netdev_open(struct net_device *dev)
1091 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; 1091 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1092 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 1092 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1093 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 1093 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1094 if (np->queue_mem == 0) 1094 if (np->queue_mem == NULL) {
1095 free_irq(dev->irq, dev);
1095 return -ENOMEM; 1096 return -ENOMEM;
1097 }
1096 1098
1097 np->tx_done_q = np->queue_mem; 1099 np->tx_done_q = np->queue_mem;
1098 np->tx_done_q_dma = np->queue_mem_dma; 1100 np->tx_done_q_dma = np->queue_mem_dma;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 5de0554fd7c6..0ab9c38b4a34 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -80,7 +80,7 @@
80 I/O access could affect performance in ARM-based system 80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support 81 - Add Linux software VLAN support
82 82
83 Version LK1.08 (D-Link): 83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address 84 - Fix bug of custom mac address
85 (StationAddr register only accept word write) 85 (StationAddr register only accept word write)
86 86
@@ -91,11 +91,14 @@
91 Version LK1.09a (ICPlus): 91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM 92 - Add the delay time in reading the contents of EEPROM
93 93
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
96
94*/ 97*/
95 98
96#define DRV_NAME "sundance" 99#define DRV_NAME "sundance"
97#define DRV_VERSION "1.01+LK1.09a" 100#define DRV_VERSION "1.01+LK1.10"
98#define DRV_RELDATE "10-Jul-2003" 101#define DRV_RELDATE "28-Oct-2005"
99 102
100 103
101/* The user-configurable values. 104/* The user-configurable values.
@@ -263,8 +266,10 @@ IV. Notes
263IVb. References 266IVb. References
264 267
265The Sundance ST201 datasheet, preliminary version. 268The Sundance ST201 datasheet, preliminary version.
266http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html 269The Kendin KS8723 datasheet, preliminary version.
267http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html 270The ICplus IP100 datasheet, preliminary version.
271http://www.scyld.com/expert/100mbps.html
272http://www.scyld.com/expert/NWay.html
268 273
269IVc. Errata 274IVc. Errata
270 275
@@ -500,6 +505,25 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static int netdev_close(struct net_device *dev); 505static int netdev_close(struct net_device *dev);
501static struct ethtool_ops ethtool_ops; 506static struct ethtool_ops ethtool_ops;
502 507
508static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
509{
510 struct netdev_private *np = netdev_priv(dev);
511 void __iomem *ioaddr = np->base + ASICCtrl;
512 int countdown;
513
514 /* ST201 documentation states ASICCtrl is a 32bit register */
515 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
516 /* ST201 documentation states reset can take up to 1 ms */
517 countdown = 10 + 1;
518 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
519 if (--countdown == 0) {
520 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
521 break;
522 }
523 udelay(100);
524 }
525}
526
503static int __devinit sundance_probe1 (struct pci_dev *pdev, 527static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent) 528 const struct pci_device_id *ent)
505{ 529{
@@ -1190,23 +1214,33 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1190 ("%s: Transmit status is %2.2x.\n", 1214 ("%s: Transmit status is %2.2x.\n",
1191 dev->name, tx_status); 1215 dev->name, tx_status);
1192 if (tx_status & 0x1e) { 1216 if (tx_status & 0x1e) {
1217 if (netif_msg_tx_err(np))
1218 printk("%s: Transmit error status %4.4x.\n",
1219 dev->name, tx_status);
1193 np->stats.tx_errors++; 1220 np->stats.tx_errors++;
1194 if (tx_status & 0x10) 1221 if (tx_status & 0x10)
1195 np->stats.tx_fifo_errors++; 1222 np->stats.tx_fifo_errors++;
1196 if (tx_status & 0x08) 1223 if (tx_status & 0x08)
1197 np->stats.collisions++; 1224 np->stats.collisions++;
1225 if (tx_status & 0x04)
1226 np->stats.tx_fifo_errors++;
1198 if (tx_status & 0x02) 1227 if (tx_status & 0x02)
1199 np->stats.tx_window_errors++; 1228 np->stats.tx_window_errors++;
1200 /* This reset has not been verified!. */ 1229 /*
1201 if (tx_status & 0x10) { /* Reset the Tx. */ 1230 ** This reset has been verified on
1202 np->stats.tx_fifo_errors++; 1231 ** DFE-580TX boards ! phdm@macqel.be.
1203 spin_lock(&np->lock); 1232 */
1204 reset_tx(dev); 1233 if (tx_status & 0x10) { /* TxUnderrun */
1205 spin_unlock(&np->lock); 1234 unsigned short txthreshold;
1235
1236 txthreshold = ioread16 (ioaddr + TxStartThresh);
1237 /* Restart Tx FIFO and transmitter */
1238 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1239 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1240 /* No need to reset the Tx pointer here */
1206 } 1241 }
1207 if (tx_status & 0x1e) /* Restart the Tx. */ 1242 /* Restart the Tx. */
1208 iowrite16 (TxEnable, 1243 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1209 ioaddr + MACCtrl1);
1210 } 1244 }
1211 /* Yup, this is a documentation bug. It cost me *hours*. */ 1245 /* Yup, this is a documentation bug. It cost me *hours*. */
1212 iowrite16 (0, ioaddr + TxStatus); 1246 iowrite16 (0, ioaddr + TxStatus);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 1802c3b48799..1828a6bf8458 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <linux/dma-mapping.h>
40 41
41#include <net/checksum.h> 42#include <net/checksum.h>
42 43
@@ -67,8 +68,8 @@
67 68
68#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.42" 71#define DRV_MODULE_VERSION "3.43"
71#define DRV_MODULE_RELDATE "Oct 3, 2005" 72#define DRV_MODULE_RELDATE "Oct 24, 2005"
72 73
73#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -219,6 +220,10 @@ static struct pci_device_id tg3_pci_tbl[] = {
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, 221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, 227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, 229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
@@ -466,6 +471,15 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
466 spin_unlock_irqrestore(&tp->indirect_lock, flags); 471 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467} 472}
468 473
474static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
475{
476 /* If no workaround is needed, write to mem space directly */
477 if (tp->write32 != tg3_write_indirect_reg32)
478 tw32(NIC_SRAM_WIN_BASE + off, val);
479 else
480 tg3_write_mem(tp, off, val);
481}
482
469static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 483static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
470{ 484{
471 unsigned long flags; 485 unsigned long flags;
@@ -570,7 +584,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
570 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 584 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
571 u32 orig_clock_ctrl; 585 u32 orig_clock_ctrl;
572 586
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 587 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
574 return; 588 return;
575 589
576 orig_clock_ctrl = clock_ctrl; 590 orig_clock_ctrl = clock_ctrl;
@@ -1210,7 +1224,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1210 CLOCK_CTRL_ALTCLK | 1224 CLOCK_CTRL_ALTCLK |
1211 CLOCK_CTRL_PWRDOWN_PLL133); 1225 CLOCK_CTRL_PWRDOWN_PLL133);
1212 udelay(40); 1226 udelay(40);
1213 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 1227 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1214 /* do nothing */ 1228 /* do nothing */
1215 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 1229 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { 1230 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
@@ -3712,14 +3726,14 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3712 dev->mtu = new_mtu; 3726 dev->mtu = new_mtu;
3713 3727
3714 if (new_mtu > ETH_DATA_LEN) { 3728 if (new_mtu > ETH_DATA_LEN) {
3715 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 3729 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3716 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 3730 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3717 ethtool_op_set_tso(dev, 0); 3731 ethtool_op_set_tso(dev, 0);
3718 } 3732 }
3719 else 3733 else
3720 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 3734 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3721 } else { 3735 } else {
3722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 3736 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3723 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 3737 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3724 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 3738 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3725 } 3739 }
@@ -3850,7 +3864,7 @@ static void tg3_init_rings(struct tg3 *tp)
3850 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); 3864 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3851 3865
3852 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; 3866 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3853 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) && 3867 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3854 (tp->dev->mtu > ETH_DATA_LEN)) 3868 (tp->dev->mtu > ETH_DATA_LEN))
3855 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; 3869 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3856 3870
@@ -3905,10 +3919,8 @@ static void tg3_init_rings(struct tg3 *tp)
3905 */ 3919 */
3906static void tg3_free_consistent(struct tg3 *tp) 3920static void tg3_free_consistent(struct tg3 *tp)
3907{ 3921{
3908 if (tp->rx_std_buffers) { 3922 kfree(tp->rx_std_buffers);
3909 kfree(tp->rx_std_buffers); 3923 tp->rx_std_buffers = NULL;
3910 tp->rx_std_buffers = NULL;
3911 }
3912 if (tp->rx_std) { 3924 if (tp->rx_std) {
3913 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 3925 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3914 tp->rx_std, tp->rx_std_mapping); 3926 tp->rx_std, tp->rx_std_mapping);
@@ -4347,7 +4359,7 @@ static int tg3_chip_reset(struct tg3 *tp)
4347 val &= ~PCIX_CAPS_RELAXED_ORDERING; 4359 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4348 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); 4360 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4349 4361
4350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 4362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4351 u32 val; 4363 u32 val;
4352 4364
4353 /* Chip reset on 5780 will reset MSI enable bit, 4365 /* Chip reset on 5780 will reset MSI enable bit,
@@ -6003,7 +6015,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6003 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 6015 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6004 6016
6005 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 6017 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6006 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)) 6018 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6007 limit = 8; 6019 limit = 8;
6008 else 6020 else
6009 limit = 16; 6021 limit = 16;
@@ -6191,14 +6203,16 @@ static void tg3_timer(unsigned long __opaque)
6191 tp->timer_counter = tp->timer_multiplier; 6203 tp->timer_counter = tp->timer_multiplier;
6192 } 6204 }
6193 6205
6194 /* Heartbeat is only sent once every 120 seconds. */ 6206 /* Heartbeat is only sent once every 2 seconds. */
6195 if (!--tp->asf_counter) { 6207 if (!--tp->asf_counter) {
6196 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 6208 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6197 u32 val; 6209 u32 val;
6198 6210
6199 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE); 6211 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6200 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 6212 FWCMD_NICDRV_ALIVE2);
6201 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3); 6213 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6214 /* 5 seconds timeout */
6215 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6202 val = tr32(GRC_RX_CPU_EVENT); 6216 val = tr32(GRC_RX_CPU_EVENT);
6203 val |= (1 << 14); 6217 val |= (1 << 14);
6204 tw32(GRC_RX_CPU_EVENT, val); 6218 tw32(GRC_RX_CPU_EVENT, val);
@@ -6409,7 +6423,7 @@ static int tg3_open(struct net_device *dev)
6409 tp->timer_counter = tp->timer_multiplier = 6423 tp->timer_counter = tp->timer_multiplier =
6410 (HZ / tp->timer_offset); 6424 (HZ / tp->timer_offset);
6411 tp->asf_counter = tp->asf_multiplier = 6425 tp->asf_counter = tp->asf_multiplier =
6412 ((HZ / tp->timer_offset) * 120); 6426 ((HZ / tp->timer_offset) * 2);
6413 6427
6414 init_timer(&tp->timer); 6428 init_timer(&tp->timer);
6415 tp->timer.expires = jiffies + tp->timer_offset; 6429 tp->timer.expires = jiffies + tp->timer_offset;
@@ -7237,7 +7251,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7237 cmd->supported |= (SUPPORTED_1000baseT_Half | 7251 cmd->supported |= (SUPPORTED_1000baseT_Half |
7238 SUPPORTED_1000baseT_Full); 7252 SUPPORTED_1000baseT_Full);
7239 7253
7240 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) 7254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7241 cmd->supported |= (SUPPORTED_100baseT_Half | 7255 cmd->supported |= (SUPPORTED_100baseT_Half |
7242 SUPPORTED_100baseT_Full | 7256 SUPPORTED_100baseT_Full |
7243 SUPPORTED_10baseT_Half | 7257 SUPPORTED_10baseT_Half |
@@ -7264,7 +7278,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7264{ 7278{
7265 struct tg3 *tp = netdev_priv(dev); 7279 struct tg3 *tp = netdev_priv(dev);
7266 7280
7267 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 7281 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7268 /* These are the only valid advertisement bits allowed. */ 7282 /* These are the only valid advertisement bits allowed. */
7269 if (cmd->autoneg == AUTONEG_ENABLE && 7283 if (cmd->autoneg == AUTONEG_ENABLE &&
7270 (cmd->advertising & ~(ADVERTISED_1000baseT_Half | 7284 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
@@ -7272,7 +7286,17 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7272 ADVERTISED_Autoneg | 7286 ADVERTISED_Autoneg |
7273 ADVERTISED_FIBRE))) 7287 ADVERTISED_FIBRE)))
7274 return -EINVAL; 7288 return -EINVAL;
7275 } 7289 /* Fiber can only do SPEED_1000. */
7290 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7291 (cmd->speed != SPEED_1000))
7292 return -EINVAL;
7293 /* Copper cannot force SPEED_1000. */
7294 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7295 (cmd->speed == SPEED_1000))
7296 return -EINVAL;
7297 else if ((cmd->speed == SPEED_1000) &&
7298 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7299 return -EINVAL;
7276 7300
7277 tg3_full_lock(tp, 0); 7301 tg3_full_lock(tp, 0);
7278 7302
@@ -8380,7 +8404,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8380 } 8404 }
8381 8405
8382 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 8406 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8383 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)) { 8407 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8384 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 8408 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8385 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 8409 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8386 tp->nvram_jedecnum = JEDEC_ATMEL; 8410 tp->nvram_jedecnum = JEDEC_ATMEL;
@@ -8980,7 +9004,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8980 9004
8981 tp->phy_id = eeprom_phy_id; 9005 tp->phy_id = eeprom_phy_id;
8982 if (eeprom_phy_serdes) { 9006 if (eeprom_phy_serdes) {
8983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9007 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
8984 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; 9008 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8985 else 9009 else
8986 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 9010 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -9393,8 +9417,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9393 } 9417 }
9394 9418
9395 /* Find msi capability. */ 9419 /* Find msi capability. */
9396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9422 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9397 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9423 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9424 }
9398 9425
9399 /* Initialize misc host control in PCI block. */ 9426 /* Initialize misc host control in PCI block. */
9400 tp->misc_host_ctrl |= (misc_ctrl_reg & 9427 tp->misc_host_ctrl |= (misc_ctrl_reg &
@@ -9412,7 +9439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9412 9439
9413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 9440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 9441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9415 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9442 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9416 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 9443 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9417 9444
9418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || 9445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
@@ -9607,7 +9634,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9607 * ether_setup() via the alloc_etherdev() call 9634 * ether_setup() via the alloc_etherdev() call
9608 */ 9635 */
9609 if (tp->dev->mtu > ETH_DATA_LEN && 9636 if (tp->dev->mtu > ETH_DATA_LEN &&
9610 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780) 9637 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9611 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 9638 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9612 9639
9613 /* Determine WakeOnLan speed to use. */ 9640 /* Determine WakeOnLan speed to use. */
@@ -9830,7 +9857,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
9830 mac_offset = 0x7c; 9857 mac_offset = 0x7c;
9831 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 9858 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9832 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) || 9859 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 9860 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9834 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 9861 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9835 mac_offset = 0xcc; 9862 mac_offset = 0xcc;
9836 if (tg3_nvram_lock(tp)) 9863 if (tg3_nvram_lock(tp))
@@ -10148,6 +10175,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
10148 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 10175 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10149 /* 5780 always in PCIX mode */ 10176 /* 5780 always in PCIX mode */
10150 tp->dma_rwctrl |= 0x00144000; 10177 tp->dma_rwctrl |= 0x00144000;
10178 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10179 /* 5714 always in PCIX mode */
10180 tp->dma_rwctrl |= 0x00148000;
10151 } else { 10181 } else {
10152 tp->dma_rwctrl |= 0x001b000f; 10182 tp->dma_rwctrl |= 0x001b000f;
10153 } 10183 }
@@ -10347,6 +10377,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
10347 case PHY_ID_BCM5705: return "5705"; 10377 case PHY_ID_BCM5705: return "5705";
10348 case PHY_ID_BCM5750: return "5750"; 10378 case PHY_ID_BCM5750: return "5750";
10349 case PHY_ID_BCM5752: return "5752"; 10379 case PHY_ID_BCM5752: return "5752";
10380 case PHY_ID_BCM5714: return "5714";
10350 case PHY_ID_BCM5780: return "5780"; 10381 case PHY_ID_BCM5780: return "5780";
10351 case PHY_ID_BCM8002: return "8002/serdes"; 10382 case PHY_ID_BCM8002: return "8002/serdes";
10352 case 0: return "serdes"; 10383 case 0: return "serdes";
@@ -10492,17 +10523,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10492 } 10523 }
10493 10524
10494 /* Configure DMA attributes. */ 10525 /* Configure DMA attributes. */
10495 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); 10526 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10496 if (!err) { 10527 if (!err) {
10497 pci_using_dac = 1; 10528 pci_using_dac = 1;
10498 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); 10529 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10499 if (err < 0) { 10530 if (err < 0) {
10500 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA " 10531 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10501 "for consistent allocations\n"); 10532 "for consistent allocations\n");
10502 goto err_out_free_res; 10533 goto err_out_free_res;
10503 } 10534 }
10504 } else { 10535 } else {
10505 err = pci_set_dma_mask(pdev, 0xffffffffULL); 10536 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10506 if (err) { 10537 if (err) {
10507 printk(KERN_ERR PFX "No usable DMA configuration, " 10538 printk(KERN_ERR PFX "No usable DMA configuration, "
10508 "aborting.\n"); 10539 "aborting.\n");
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 2e733c60bfa4..fb7e2a5f4a08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -137,6 +137,7 @@
137#define ASIC_REV_5750 0x04 137#define ASIC_REV_5750 0x04
138#define ASIC_REV_5752 0x06 138#define ASIC_REV_5752 0x06
139#define ASIC_REV_5780 0x08 139#define ASIC_REV_5780 0x08
140#define ASIC_REV_5714 0x09
140#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 141#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
141#define CHIPREV_5700_AX 0x70 142#define CHIPREV_5700_AX 0x70
142#define CHIPREV_5700_BX 0x71 143#define CHIPREV_5700_BX 0x71
@@ -531,6 +532,8 @@
531#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 532#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
532#define MAC_SERDES_STAT 0x00000594 533#define MAC_SERDES_STAT 0x00000594
533/* 0x598 --> 0x5b0 unused */ 534/* 0x598 --> 0x5b0 unused */
535#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
536#define SERDES_RX_SIG_DETECT 0x00000400
534#define SG_DIG_CTRL 0x000005b0 537#define SG_DIG_CTRL 0x000005b0
535#define SG_DIG_USING_HW_AUTONEG 0x80000000 538#define SG_DIG_USING_HW_AUTONEG 0x80000000
536#define SG_DIG_SOFT_RESET 0x40000000 539#define SG_DIG_SOFT_RESET 0x40000000
@@ -1329,6 +1332,8 @@
1329#define GRC_LCLCTRL_CLEARINT 0x00000002 1332#define GRC_LCLCTRL_CLEARINT 0x00000002
1330#define GRC_LCLCTRL_SETINT 0x00000004 1333#define GRC_LCLCTRL_SETINT 0x00000004
1331#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 1334#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008
1335#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */
1336#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */
1332#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 1337#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020
1333#define GRC_LCLCTRL_GPIO_OE3 0x00000040 1338#define GRC_LCLCTRL_GPIO_OE3 0x00000040
1334#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 1339#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080
@@ -1507,6 +1512,7 @@
1507#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 1512#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
1508#define FWCMD_NICDRV_FIX_DMAR 0x00000005 1513#define FWCMD_NICDRV_FIX_DMAR 0x00000005
1509#define FWCMD_NICDRV_FIX_DMAW 0x00000006 1514#define FWCMD_NICDRV_FIX_DMAW 0x00000006
1515#define FWCMD_NICDRV_ALIVE2 0x0000000d
1510#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c 1516#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
1511#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 1517#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80
1512#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 1518#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00
@@ -2175,6 +2181,7 @@ struct tg3 {
2175 TG3_FLG2_MII_SERDES) 2181 TG3_FLG2_MII_SERDES)
2176#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2182#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2177#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2183#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2184#define TG3_FLG2_5780_CLASS 0x04000000
2178 2185
2179 u32 split_mode_max_reqs; 2186 u32 split_mode_max_reqs;
2180#define SPLIT_MODE_5704_MAX_REQ 3 2187#define SPLIT_MODE_5704_MAX_REQ 3
@@ -2222,6 +2229,7 @@ struct tg3 {
2222#define PHY_ID_BCM5705 0x600081a0 2229#define PHY_ID_BCM5705 0x600081a0
2223#define PHY_ID_BCM5750 0x60008180 2230#define PHY_ID_BCM5750 0x60008180
2224#define PHY_ID_BCM5752 0x60008100 2231#define PHY_ID_BCM5752 0x60008100
2232#define PHY_ID_BCM5714 0x60008340
2225#define PHY_ID_BCM5780 0x60008350 2233#define PHY_ID_BCM5780 0x60008350
2226#define PHY_ID_BCM8002 0x60010140 2234#define PHY_ID_BCM8002 0x60010140
2227#define PHY_ID_INVALID 0xffffffff 2235#define PHY_ID_INVALID 0xffffffff
@@ -2246,8 +2254,8 @@ struct tg3 {
2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \ 2254 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ 2255 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ 2256 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2249 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \ 2257 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
2250 (X) == PHY_ID_BCM8002) 2258 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002)
2251 2259
2252 struct tg3_hw_stats *hw_stats; 2260 struct tg3_hw_stats *hw_stats;
2253 dma_addr_t stats_mapping; 2261 dma_addr_t stats_mapping;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 6b8eee8f7bfd..d7fb3ffe06ac 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2076,8 +2076,7 @@ static int __init de_init_one (struct pci_dev *pdev,
2076 return 0; 2076 return 0;
2077 2077
2078err_out_iomap: 2078err_out_iomap:
2079 if (de->ee_data) 2079 kfree(de->ee_data);
2080 kfree(de->ee_data);
2081 iounmap(regs); 2080 iounmap(regs);
2082err_out_res: 2081err_out_res:
2083 pci_release_regions(pdev); 2082 pci_release_regions(pdev);
@@ -2096,8 +2095,7 @@ static void __exit de_remove_one (struct pci_dev *pdev)
2096 if (!dev) 2095 if (!dev)
2097 BUG(); 2096 BUG();
2098 unregister_netdev(dev); 2097 unregister_netdev(dev);
2099 if (de->ee_data) 2098 kfree(de->ee_data);
2100 kfree(de->ee_data);
2101 iounmap(de->regs); 2099 iounmap(de->regs);
2102 pci_release_regions(pdev); 2100 pci_release_regions(pdev);
2103 pci_disable_device(pdev); 2101 pci_disable_device(pdev);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 6266a9a7e6e3..125ed00e95a5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1727,8 +1727,7 @@ err_out_free_ring:
1727 tp->rx_ring, tp->rx_ring_dma); 1727 tp->rx_ring, tp->rx_ring_dma);
1728 1728
1729err_out_mtable: 1729err_out_mtable:
1730 if (tp->mtable) 1730 kfree (tp->mtable);
1731 kfree (tp->mtable);
1732 pci_iounmap(pdev, ioaddr); 1731 pci_iounmap(pdev, ioaddr);
1733 1732
1734err_out_free_res: 1733err_out_free_res:
@@ -1806,8 +1805,7 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
1806 sizeof (struct tulip_rx_desc) * RX_RING_SIZE + 1805 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1807 sizeof (struct tulip_tx_desc) * TX_RING_SIZE, 1806 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1808 tp->rx_ring, tp->rx_ring_dma); 1807 tp->rx_ring, tp->rx_ring_dma);
1809 if (tp->mtable) 1808 kfree (tp->mtable);
1810 kfree (tp->mtable);
1811 pci_iounmap(pdev, tp->base_addr); 1809 pci_iounmap(pdev, tp->base_addr);
1812 free_netdev (dev); 1810 free_netdev (dev);
1813 pci_release_regions (pdev); 1811 pci_release_regions (pdev);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index abc5cee6eedc..a368d08e7d19 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1212,10 +1212,8 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1212 velocity_free_td_ring_entry(vptr, j, i); 1212 velocity_free_td_ring_entry(vptr, j, i);
1213 1213
1214 } 1214 }
1215 if (vptr->td_infos[j]) { 1215 kfree(vptr->td_infos[j]);
1216 kfree(vptr->td_infos[j]); 1216 vptr->td_infos[j] = NULL;
1217 vptr->td_infos[j] = NULL;
1218 }
1219 } 1217 }
1220} 1218}
1221 1219
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index cb429e783749..4c11699bad91 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2381,14 +2381,10 @@ void stop_airo_card( struct net_device *dev, int freeres )
2381 dev_kfree_skb(skb); 2381 dev_kfree_skb(skb);
2382 } 2382 }
2383 2383
2384 if (ai->flash) 2384 kfree(ai->flash);
2385 kfree(ai->flash); 2385 kfree(ai->rssi);
2386 if (ai->rssi) 2386 kfree(ai->APList);
2387 kfree(ai->rssi); 2387 kfree(ai->SSID);
2388 if (ai->APList)
2389 kfree(ai->APList);
2390 if (ai->SSID)
2391 kfree(ai->SSID);
2392 if (freeres) { 2388 if (freeres) {
2393 /* PCMCIA frees this stuff, so only for PCI and ISA */ 2389 /* PCMCIA frees this stuff, so only for PCI and ISA */
2394 release_region( dev->base_addr, 64 ); 2390 release_region( dev->base_addr, 64 );
@@ -3626,10 +3622,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3626 int rc; 3622 int rc;
3627 3623
3628 memset( &mySsid, 0, sizeof( mySsid ) ); 3624 memset( &mySsid, 0, sizeof( mySsid ) );
3629 if (ai->flash) { 3625 kfree (ai->flash);
3630 kfree (ai->flash); 3626 ai->flash = NULL;
3631 ai->flash = NULL;
3632 }
3633 3627
3634 /* The NOP is the first step in getting the card going */ 3628 /* The NOP is the first step in getting the card going */
3635 cmd.cmd = NOP; 3629 cmd.cmd = NOP;
@@ -3666,14 +3660,10 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3666 tdsRssiRid rssi_rid; 3660 tdsRssiRid rssi_rid;
3667 CapabilityRid cap_rid; 3661 CapabilityRid cap_rid;
3668 3662
3669 if (ai->APList) { 3663 kfree(ai->APList);
3670 kfree(ai->APList); 3664 ai->APList = NULL;
3671 ai->APList = NULL; 3665 kfree(ai->SSID);
3672 } 3666 ai->SSID = NULL;
3673 if (ai->SSID) {
3674 kfree(ai->SSID);
3675 ai->SSID = NULL;
3676 }
3677 // general configuration (read/modify/write) 3667 // general configuration (read/modify/write)
3678 status = readConfigRid(ai, lock); 3668 status = readConfigRid(ai, lock);
3679 if ( status != SUCCESS ) return ERROR; 3669 if ( status != SUCCESS ) return ERROR;
@@ -3687,10 +3677,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3687 memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ 3677 memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
3688 } 3678 }
3689 else { 3679 else {
3690 if (ai->rssi) { 3680 kfree(ai->rssi);
3691 kfree(ai->rssi); 3681 ai->rssi = NULL;
3692 ai->rssi = NULL;
3693 }
3694 if (cap_rid.softCap & 8) 3682 if (cap_rid.softCap & 8)
3695 ai->config.rmode |= RXMODE_NORMALIZED_RSSI; 3683 ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
3696 else 3684 else
@@ -5369,11 +5357,13 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5369 5357
5370static int proc_close( struct inode *inode, struct file *file ) 5358static int proc_close( struct inode *inode, struct file *file )
5371{ 5359{
5372 struct proc_data *data = (struct proc_data *)file->private_data; 5360 struct proc_data *data = file->private_data;
5373 if ( data->on_close != NULL ) data->on_close( inode, file ); 5361
5374 if ( data->rbuffer ) kfree( data->rbuffer ); 5362 if (data->on_close != NULL)
5375 if ( data->wbuffer ) kfree( data->wbuffer ); 5363 data->on_close(inode, file);
5376 kfree( data ); 5364 kfree(data->rbuffer);
5365 kfree(data->wbuffer);
5366 kfree(data);
5377 return 0; 5367 return 0;
5378} 5368}
5379 5369
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index bf25584d68d3..784de9109113 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -258,9 +258,7 @@ static void airo_detach(dev_link_t *link)
258 258
259 /* Unlink device structure, free pieces */ 259 /* Unlink device structure, free pieces */
260 *linkp = link->next; 260 *linkp = link->next;
261 if (link->priv) { 261 kfree(link->priv);
262 kfree(link->priv);
263 }
264 kfree(link); 262 kfree(link);
265 263
266} /* airo_detach */ 264} /* airo_detach */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d57011028b72..1fbe027d26b6 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1653,8 +1653,7 @@ void stop_atmel_card(struct net_device *dev, int freeres)
1653 unregister_netdev(dev); 1653 unregister_netdev(dev);
1654 remove_proc_entry("driver/atmel", NULL); 1654 remove_proc_entry("driver/atmel", NULL);
1655 free_irq(dev->irq, dev); 1655 free_irq(dev->irq, dev);
1656 if (priv->firmware) 1656 kfree(priv->firmware);
1657 kfree(priv->firmware);
1658 if (freeres) { 1657 if (freeres) {
1659 /* PCMCIA frees this stuff, so only for PCI */ 1658 /* PCMCIA frees this stuff, so only for PCI */
1660 release_region(dev->base_addr, 64); 1659 release_region(dev->base_addr, 64);
@@ -2450,8 +2449,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2450 break; 2449 break;
2451 } 2450 }
2452 2451
2453 if (priv->firmware) 2452 kfree(priv->firmware);
2454 kfree(priv->firmware);
2455 2453
2456 priv->firmware = new_firmware; 2454 priv->firmware = new_firmware;
2457 priv->firmware_length = com.len; 2455 priv->firmware_length = com.len;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ff031a3985b3..195cb36619e8 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -259,8 +259,7 @@ static void atmel_detach(dev_link_t *link)
259 259
260 /* Unlink device structure, free pieces */ 260 /* Unlink device structure, free pieces */
261 *linkp = link->next; 261 *linkp = link->next;
262 if (link->priv) 262 kfree(link->priv);
263 kfree(link->priv);
264 kfree(link); 263 kfree(link);
265} 264}
266 265
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index eba0d9d2b7c5..579480dad374 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -444,6 +444,43 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
444 return err; 444 return err;
445} 445}
446 446
447/* Write a block of data to the chip's buffer with padding if
448 * neccessary, via the BAP. Synchronization/serialization is the
449 * caller's problem. len must be even.
450 *
451 * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
452 */
453int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, unsigned len,
454 u16 id, u16 offset)
455{
456 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
457 int err = 0;
458
459 if (len < 0 || len % 2 || data_len > len)
460 return -EINVAL;
461
462 err = hermes_bap_seek(hw, bap, id, offset);
463 if (err)
464 goto out;
465
466 /* Transfer all the complete words of data */
467 hermes_write_words(hw, dreg, buf, data_len/2);
468 /* If there is an odd byte left over pad and transfer it */
469 if (data_len & 1) {
470 u8 end[2];
471 end[1] = 0;
472 end[0] = ((unsigned char *)buf)[data_len - 1];
473 hermes_write_words(hw, dreg, end, 1);
474 data_len ++;
475 }
476 /* Now send zeros for the padding */
477 if (data_len < len)
478 hermes_clear_words(hw, dreg, (len - data_len) / 2);
479 /* Complete */
480 out:
481 return err;
482}
483
447/* Read a Length-Type-Value record from the card. 484/* Read a Length-Type-Value record from the card.
448 * 485 *
449 * If length is NULL, we ignore the length read from the card, and 486 * If length is NULL, we ignore the length read from the card, and
@@ -531,6 +568,7 @@ EXPORT_SYMBOL(hermes_allocate);
531 568
532EXPORT_SYMBOL(hermes_bap_pread); 569EXPORT_SYMBOL(hermes_bap_pread);
533EXPORT_SYMBOL(hermes_bap_pwrite); 570EXPORT_SYMBOL(hermes_bap_pwrite);
571EXPORT_SYMBOL(hermes_bap_pwrite_pad);
534EXPORT_SYMBOL(hermes_read_ltv); 572EXPORT_SYMBOL(hermes_read_ltv);
535EXPORT_SYMBOL(hermes_write_ltv); 573EXPORT_SYMBOL(hermes_write_ltv);
536 574
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index ad28e3294360..a6bd472d75d4 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -376,6 +376,8 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
376 u16 id, u16 offset); 376 u16 id, u16 offset);
377int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len, 377int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
378 u16 id, u16 offset); 378 u16 id, u16 offset);
379int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf,
380 unsigned data_len, unsigned len, u16 id, u16 offset);
379int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen, 381int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
380 u16 *length, void *buf); 382 u16 *length, void *buf);
381int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 383int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 53f5246c40aa..2617d70bcda9 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -552,7 +552,6 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
552 552
553 kfree(addr); 553 kfree(addr);
554 kfree(qual); 554 kfree(qual);
555
556 return 0; 555 return 0;
557} 556}
558 557
@@ -3081,9 +3080,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
3081 ret = local->func->download(local, param); 3080 ret = local->func->download(local, param);
3082 3081
3083 out: 3082 out:
3084 if (param != NULL) 3083 kfree(param);
3085 kfree(param);
3086
3087 return ret; 3084 return ret;
3088} 3085}
3089#endif /* PRISM2_DOWNLOAD_SUPPORT */ 3086#endif /* PRISM2_DOWNLOAD_SUPPORT */
@@ -3890,9 +3887,7 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
3890 } 3887 }
3891 3888
3892 out: 3889 out:
3893 if (param != NULL) 3890 kfree(param);
3894 kfree(param);
3895
3896 return ret; 3891 return ret;
3897} 3892}
3898 3893
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index de4e6c23e4b8..3db0c32afe82 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4030,6 +4030,10 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4030 int i; 4030 int i;
4031 4031
4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL); 4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4033 if (unlikely(!rxq)) {
4034 IPW_ERROR("memory allocation failed\n");
4035 return NULL;
4036 }
4033 memset(rxq, 0, sizeof(*rxq)); 4037 memset(rxq, 0, sizeof(*rxq));
4034 spin_lock_init(&rxq->lock); 4038 spin_lock_init(&rxq->lock);
4035 INIT_LIST_HEAD(&rxq->rx_free); 4039 INIT_LIST_HEAD(&rxq->rx_free);
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d3d4ec9e242e..488ab06fb79f 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -490,7 +490,8 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
490 return 0; 490 return 0;
491 } 491 }
492 492
493 /* Check packet length, pad short packets, round up odd length */ 493 /* Length of the packet body */
494 /* FIXME: what if the skb is smaller than this? */
494 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN); 495 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
495 skb = skb_padto(skb, len); 496 skb = skb_padto(skb, len);
496 if (skb == NULL) 497 if (skb == NULL)
@@ -541,13 +542,21 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
541 stats->tx_errors++; 542 stats->tx_errors++;
542 goto fail; 543 goto fail;
543 } 544 }
545 /* Actual xfer length - allow for padding */
546 len = ALIGN(data_len, 2);
547 if (len < ETH_ZLEN - ETH_HLEN)
548 len = ETH_ZLEN - ETH_HLEN;
544 } else { /* IEEE 802.3 frame */ 549 } else { /* IEEE 802.3 frame */
545 data_len = len + ETH_HLEN; 550 data_len = len + ETH_HLEN;
546 data_off = HERMES_802_3_OFFSET; 551 data_off = HERMES_802_3_OFFSET;
547 p = skb->data; 552 p = skb->data;
553 /* Actual xfer length - round up for odd length packets */
554 len = ALIGN(data_len, 2);
555 if (len < ETH_ZLEN)
556 len = ETH_ZLEN;
548 } 557 }
549 558
550 err = hermes_bap_pwrite(hw, USER_BAP, p, data_len, 559 err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
551 txfid, data_off); 560 txfid, data_off);
552 if (err) { 561 if (err) {
553 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 562 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 6c9584a9f284..78bdb359835e 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -754,8 +754,7 @@ islpci_free_memory(islpci_private *priv)
754 pci_unmap_single(priv->pdev, buf->pci_addr, 754 pci_unmap_single(priv->pdev, buf->pci_addr,
755 buf->size, PCI_DMA_FROMDEVICE); 755 buf->size, PCI_DMA_FROMDEVICE);
756 buf->pci_addr = 0; 756 buf->pci_addr = 0;
757 if (buf->mem) 757 kfree(buf->mem);
758 kfree(buf->mem);
759 buf->size = 0; 758 buf->size = 0;
760 buf->mem = NULL; 759 buf->mem = NULL;
761 } 760 }
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 5952e9960499..3b49efa37ee5 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -97,12 +97,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
97 /* lock the driver code */ 97 /* lock the driver code */
98 spin_lock_irqsave(&priv->slock, flags); 98 spin_lock_irqsave(&priv->slock, flags);
99 99
100 /* determine the amount of fragments needed to store the frame */
101
102 frame_size = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
103 if (init_wds)
104 frame_size += 6;
105
106 /* check whether the destination queue has enough fragments for the frame */ 100 /* check whether the destination queue has enough fragments for the frame */
107 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]); 101 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
108 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) { 102 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
@@ -213,6 +207,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
213 /* store the skb address for future freeing */ 207 /* store the skb address for future freeing */
214 priv->data_low_tx[index] = skb; 208 priv->data_low_tx[index] = skb;
215 /* set the proper fragment start address and size information */ 209 /* set the proper fragment start address and size information */
210 frame_size = skb->len;
216 fragment->size = cpu_to_le16(frame_size); 211 fragment->size = cpu_to_le16(frame_size);
217 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */ 212 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
218 fragment->address = cpu_to_le32(pci_map_address); 213 fragment->address = cpu_to_le32(pci_map_address);
@@ -246,12 +241,10 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
246 return 0; 241 return 0;
247 242
248 drop_free: 243 drop_free:
249 /* free the skbuf structure before aborting */
250 dev_kfree_skb(skb);
251 skb = NULL;
252
253 priv->statistics.tx_dropped++; 244 priv->statistics.tx_dropped++;
254 spin_unlock_irqrestore(&priv->slock, flags); 245 spin_unlock_irqrestore(&priv->slock, flags);
246 dev_kfree_skb(skb);
247 skb = NULL;
255 return err; 248 return err;
256} 249}
257 250
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 12123e24b113..eea2f04c8c6d 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -268,11 +268,10 @@ mgt_clean(islpci_private *priv)
268 268
269 if (!priv->mib) 269 if (!priv->mib)
270 return; 270 return;
271 for (i = 0; i < OID_NUM_LAST; i++) 271 for (i = 0; i < OID_NUM_LAST; i++) {
272 if (priv->mib[i]) { 272 kfree(priv->mib[i]);
273 kfree(priv->mib[i]); 273 priv->mib[i] = NULL;
274 priv->mib[i] = NULL; 274 }
275 }
276 kfree(priv->mib); 275 kfree(priv->mib);
277 priv->mib = NULL; 276 priv->mib = NULL;
278} 277}
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 7bc7fc823128..d25264ba0c0e 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -860,12 +860,9 @@ static int allocate_buffers(struct strip *strip_info, int mtu)
860 strip_info->mtu = dev->mtu = mtu; 860 strip_info->mtu = dev->mtu = mtu;
861 return (1); 861 return (1);
862 } 862 }
863 if (r) 863 kfree(r);
864 kfree(r); 864 kfree(s);
865 if (s) 865 kfree(t);
866 kfree(s);
867 if (t)
868 kfree(t);
869 return (0); 866 return (0);
870} 867}
871 868
@@ -922,13 +919,9 @@ static int strip_change_mtu(struct net_device *dev, int new_mtu)
922 printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n", 919 printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n",
923 strip_info->dev->name, old_mtu, strip_info->mtu); 920 strip_info->dev->name, old_mtu, strip_info->mtu);
924 921
925 if (orbuff) 922 kfree(orbuff);
926 kfree(orbuff); 923 kfree(osbuff);
927 if (osbuff) 924 kfree(otbuff);
928 kfree(osbuff);
929 if (otbuff)
930 kfree(otbuff);
931
932 return 0; 925 return 0;
933} 926}
934 927
@@ -2498,18 +2491,13 @@ static int strip_close_low(struct net_device *dev)
2498 /* 2491 /*
2499 * Free all STRIP frame buffers. 2492 * Free all STRIP frame buffers.
2500 */ 2493 */
2501 if (strip_info->rx_buff) { 2494 kfree(strip_info->rx_buff);
2502 kfree(strip_info->rx_buff); 2495 strip_info->rx_buff = NULL;
2503 strip_info->rx_buff = NULL; 2496 kfree(strip_info->sx_buff);
2504 } 2497 strip_info->sx_buff = NULL;
2505 if (strip_info->sx_buff) { 2498 kfree(strip_info->tx_buff);
2506 kfree(strip_info->sx_buff); 2499 strip_info->tx_buff = NULL;
2507 strip_info->sx_buff = NULL; 2500
2508 }
2509 if (strip_info->tx_buff) {
2510 kfree(strip_info->tx_buff);
2511 strip_info->tx_buff = NULL;
2512 }
2513 del_timer(&strip_info->idle_timer); 2501 del_timer(&strip_info->idle_timer);
2514 return 0; 2502 return 0;
2515} 2503}
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
new file mode 100644
index 000000000000..bef23bbf8690
--- /dev/null
+++ b/include/linux/fs_enet_pd.h
@@ -0,0 +1,136 @@
1/*
2 * Platform information definitions for the
3 * universal Freescale Ethernet driver.
4 *
5 * Copyright (c) 2003 Intracom S.A.
6 * by Pantelis Antoniou <panto@intracom.gr>
7 *
8 * 2005 (c) MontaVista Software, Inc.
9 * Vitaly Bordug <vbordug@ru.mvista.com>
10 *
11 * This file is licensed under the terms of the GNU General Public License
12 * version 2. This program is licensed "as is" without any warranty of any
13 * kind, whether express or implied.
14 */
15
16#ifndef FS_ENET_PD_H
17#define FS_ENET_PD_H
18
19#include <linux/version.h>
20#include <asm/types.h>
21
22#define FS_ENET_NAME "fs_enet"
23
24enum fs_id {
25 fsid_fec1,
26 fsid_fec2,
27 fsid_fcc1,
28 fsid_fcc2,
29 fsid_fcc3,
30 fsid_scc1,
31 fsid_scc2,
32 fsid_scc3,
33 fsid_scc4,
34};
35
36#define FS_MAX_INDEX 9
37
38static inline int fs_get_fec_index(enum fs_id id)
39{
40 if (id >= fsid_fec1 && id <= fsid_fec2)
41 return id - fsid_fec1;
42 return -1;
43}
44
45static inline int fs_get_fcc_index(enum fs_id id)
46{
47 if (id >= fsid_fcc1 && id <= fsid_fcc3)
48 return id - fsid_fcc1;
49 return -1;
50}
51
52static inline int fs_get_scc_index(enum fs_id id)
53{
54 if (id >= fsid_scc1 && id <= fsid_scc4)
55 return id - fsid_scc1;
56 return -1;
57}
58
59enum fs_mii_method {
60 fsmii_fixed,
61 fsmii_fec,
62 fsmii_bitbang,
63};
64
65enum fs_ioport {
66 fsiop_porta,
67 fsiop_portb,
68 fsiop_portc,
69 fsiop_portd,
70 fsiop_porte,
71};
72
73struct fs_mii_bus_info {
74 int method; /* mii method */
75 int id; /* the id of the mii_bus */
76 int disable_aneg; /* if the controller needs to negothiate speed & duplex */
77 int lpa; /* the default board-specific vallues will be applied otherwise */
78
79 union {
80 struct {
81 int duplex;
82 int speed;
83 } fixed;
84
85 struct {
86 /* nothing */
87 } fec;
88
89 struct {
90 /* nothing */
91 } scc;
92
93 struct {
94 int mdio_port; /* port & bit for MDIO */
95 int mdio_bit;
96 int mdc_port; /* port & bit for MDC */
97 int mdc_bit;
98 int delay; /* delay in us */
99 } bitbang;
100 } i;
101};
102
103struct fs_platform_info {
104
105 void(*init_ioports)(void);
106 /* device specific information */
107 int fs_no; /* controller index */
108
109 u32 cp_page; /* CPM page */
110 u32 cp_block; /* CPM sblock */
111
112 u32 clk_trx; /* some stuff for pins & mux configuration*/
113 u32 clk_route;
114 u32 clk_mask;
115
116 u32 mem_offset;
117 u32 dpram_offset;
118 u32 fcc_regs_c;
119
120 u32 device_flags;
121
122 int phy_addr; /* the phy address (-1 no phy) */
123 int phy_irq; /* the phy irq (if it exists) */
124
125 const struct fs_mii_bus_info *bus_info;
126
127 int rx_ring, tx_ring; /* number of buffers on rx */
128 __u8 macaddr[6]; /* mac address */
129 int rx_copybreak; /* limit we copy small frames */
130 int use_napi; /* use NAPI */
131 int napi_weight; /* NAPI weight */
132
133 int use_rmii; /* use RMII mode */
134};
135
136#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 467a096c3b81..56192005fa4d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1788,11 +1788,13 @@
1788#define PCI_DEVICE_ID_TIGON3_5721 0x1659 1788#define PCI_DEVICE_ID_TIGON3_5721 0x1659
1789#define PCI_DEVICE_ID_TIGON3_5705M 0x165d 1789#define PCI_DEVICE_ID_TIGON3_5705M 0x165d
1790#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e 1790#define PCI_DEVICE_ID_TIGON3_5705M_2 0x165e
1791#define PCI_DEVICE_ID_TIGON3_5714 0x1668
1791#define PCI_DEVICE_ID_TIGON3_5780 0x166a 1792#define PCI_DEVICE_ID_TIGON3_5780 0x166a
1792#define PCI_DEVICE_ID_TIGON3_5780S 0x166b 1793#define PCI_DEVICE_ID_TIGON3_5780S 0x166b
1793#define PCI_DEVICE_ID_TIGON3_5705F 0x166e 1794#define PCI_DEVICE_ID_TIGON3_5705F 0x166e
1794#define PCI_DEVICE_ID_TIGON3_5750 0x1676 1795#define PCI_DEVICE_ID_TIGON3_5750 0x1676
1795#define PCI_DEVICE_ID_TIGON3_5751 0x1677 1796#define PCI_DEVICE_ID_TIGON3_5751 0x1677
1797#define PCI_DEVICE_ID_TIGON3_5715 0x1678
1796#define PCI_DEVICE_ID_TIGON3_5750M 0x167c 1798#define PCI_DEVICE_ID_TIGON3_5750M 0x167c
1797#define PCI_DEVICE_ID_TIGON3_5751M 0x167d 1799#define PCI_DEVICE_ID_TIGON3_5751M 0x167d
1798#define PCI_DEVICE_ID_TIGON3_5751F 0x167e 1800#define PCI_DEVICE_ID_TIGON3_5751F 0x167e
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 30bb4a893237..2250a18b0cbb 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -237,8 +237,7 @@ typedef struct ax25_cb {
237static __inline__ void ax25_cb_put(ax25_cb *ax25) 237static __inline__ void ax25_cb_put(ax25_cb *ax25)
238{ 238{
239 if (atomic_dec_and_test(&ax25->refcount)) { 239 if (atomic_dec_and_test(&ax25->refcount)) {
240 if (ax25->digipeat) 240 kfree(ax25->digipeat);
241 kfree(ax25->digipeat);
242 kfree(ax25); 241 kfree(ax25);
243 } 242 }
244} 243}
diff --git a/include/net/netrom.h b/include/net/netrom.h
index a6bf6e0f606a..a5ee53bce62f 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -136,8 +136,7 @@ static __inline__ void nr_node_put(struct nr_node *nr_node)
136static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) 136static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh)
137{ 137{
138 if (atomic_dec_and_test(&nr_neigh->refcount)) { 138 if (atomic_dec_and_test(&nr_neigh->refcount)) {
139 if (nr_neigh->digipeat != NULL) 139 kfree(nr_neigh->digipeat);
140 kfree(nr_neigh->digipeat);
141 kfree(nr_neigh); 140 kfree(nr_neigh);
142 } 141 }
143} 142}