aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
committerPaul Mackerras <paulus@samba.org>2005-10-30 21:37:12 -0500
commit23fd07750a789a66fe88cf173d52a18f1a387da4 (patch)
tree06fdd6df35fdb835abdaa9b754d62f6b84b97250 /drivers/net
parentbd787d438a59266af3c9f6351644c85ef1dd21fe (diff)
parented28f96ac1960f30f818374d65be71d2fdf811b0 (diff)
Merge ../linux-2.6 by hand
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/Kconfig102
-rw-r--r--drivers/net/Makefile7
-rw-r--r--drivers/net/acenic.c6
-rw-r--r--[-rwxr-xr-x]drivers/net/amd8111e.c0
-rw-r--r--[-rwxr-xr-x]drivers/net/amd8111e.h0
-rw-r--r--drivers/net/arm/am79c961a.c1
-rw-r--r--drivers/net/au1000_eth.c19
-rw-r--r--drivers/net/b44.c164
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/bmac.c6
-rw-r--r--drivers/net/bnx2.c12
-rw-r--r--drivers/net/bonding/bond_main.c57
-rw-r--r--drivers/net/cassini.c4
-rw-r--r--drivers/net/cs89x0.c14
-rw-r--r--drivers/net/cs89x0.h2
-rw-r--r--drivers/net/declance.c37
-rw-r--r--drivers/net/dm9000.c8
-rw-r--r--drivers/net/e100.c4
-rw-r--r--drivers/net/e1000/e1000.h74
-rw-r--r--drivers/net/e1000/e1000_ethtool.c102
-rw-r--r--drivers/net/e1000/e1000_hw.c220
-rw-r--r--drivers/net/e1000/e1000_hw.h96
-rw-r--r--drivers/net/e1000/e1000_main.c1087
-rw-r--r--drivers/net/e1000/e1000_param.c10
-rw-r--r--drivers/net/eepro.c57
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/forcedeth.c310
-rw-r--r--drivers/net/fs_enet/Kconfig20
-rw-r--r--drivers/net/fs_enet/Makefile10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1226
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c507
-rw-r--r--drivers/net/fs_enet/fs_enet.h245
-rw-r--r--drivers/net/fs_enet/mac-fcc.c578
-rw-r--r--drivers/net/fs_enet/mac-fec.c653
-rw-r--r--drivers/net/fs_enet/mac-scc.c524
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c405
-rw-r--r--drivers/net/fs_enet/mii-fixed.c92
-rw-r--r--drivers/net/gianfar.c412
-rw-r--r--drivers/net/gianfar.h30
-rw-r--r--drivers/net/gianfar_ethtool.c100
-rw-r--r--drivers/net/gianfar_mii.c219
-rw-r--r--drivers/net/gianfar_mii.h45
-rw-r--r--drivers/net/gianfar_phy.c661
-rw-r--r--drivers/net/gianfar_phy.h213
-rw-r--r--drivers/net/hamradio/Kconfig1
-rw-r--r--drivers/net/hamradio/bpqether.c9
-rw-r--r--drivers/net/hamradio/mkiss.c188
-rw-r--r--drivers/net/hamradio/mkiss.h62
-rw-r--r--drivers/net/hp100.c48
-rw-r--r--drivers/net/ibm_emac/Makefile13
-rw-r--r--drivers/net/ibm_emac/ibm_emac.h408
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.c3396
-rw-r--r--drivers/net/ibm_emac/ibm_emac_core.h313
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.c363
-rw-r--r--drivers/net/ibm_emac/ibm_emac_debug.h63
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.c674
-rw-r--r--drivers/net/ibm_emac/ibm_emac_mal.h332
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.c335
-rw-r--r--drivers/net/ibm_emac/ibm_emac_phy.h105
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.c201
-rw-r--r--drivers/net/ibm_emac/ibm_emac_rgmii.h60
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.c111
-rw-r--r--drivers/net/ibm_emac/ibm_emac_tah.h96
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.c255
-rw-r--r--drivers/net/ibm_emac/ibm_emac_zmii.h104
-rw-r--r--drivers/net/ibmveth.c186
-rw-r--r--drivers/net/ibmveth.h23
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c6
-rw-r--r--drivers/net/irda/irda-usb.c6
-rw-r--r--drivers/net/irda/irport.c3
-rw-r--r--drivers/net/irda/pxaficp_ir.c865
-rw-r--r--drivers/net/irda/sa1100_ir.c8
-rw-r--r--drivers/net/irda/sir_dev.c3
-rw-r--r--drivers/net/irda/smsc-ircc2.c139
-rw-r--r--drivers/net/irda/stir4200.c7
-rw-r--r--drivers/net/irda/vlsi_ir.c3
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c8
-rw-r--r--drivers/net/ixgb/ixgb_main.c3
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lasi_82596.c30
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/mii.c15
-rw-r--r--drivers/net/mipsnet.c371
-rw-r--r--drivers/net/mipsnet.h127
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/myri_sbus.h2
-rw-r--r--drivers/net/ne.c15
-rw-r--r--drivers/net/ne2k-pci.c3
-rw-r--r--drivers/net/ni65.c9
-rw-r--r--drivers/net/ns83820.c3
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c6
-rw-r--r--drivers/net/pcnet32.c278
-rw-r--r--drivers/net/phy/Kconfig8
-rw-r--r--drivers/net/phy/mdio_bus.c20
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c3
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/rionet.c574
-rw-r--r--drivers/net/rrunner.c6
-rw-r--r--drivers/net/s2io-regs.h11
-rw-r--r--drivers/net/s2io.c794
-rw-r--r--drivers/net/s2io.h50
-rw-r--r--drivers/net/saa9730.c8
-rw-r--r--drivers/net/sb1250-mac.c1384
-rw-r--r--drivers/net/sgiseeq.c37
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c16
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smc91x.h12
-rw-r--r--drivers/net/starfire.c4
-rw-r--r--drivers/net/sunbmac.c3
-rw-r--r--drivers/net/sunbmac.h2
-rw-r--r--drivers/net/sundance.c111
-rw-r--r--drivers/net/tg3.c91
-rw-r--r--drivers/net/tg3.h12
-rw-r--r--drivers/net/tokenring/ibmtr.c9
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tms380tr.c3
-rw-r--r--drivers/net/tulip/de2104x.c11
-rw-r--r--drivers/net/tulip/tulip_core.c6
-rw-r--r--drivers/net/typhoon.c7
-rw-r--r--drivers/net/via-rhine.c38
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wan/cosa.c8
-rw-r--r--drivers/net/wan/cycx_drv.c7
-rw-r--r--drivers/net/wan/cycx_main.c2
-rw-r--r--drivers/net/wan/cycx_x25.c5
-rw-r--r--drivers/net/wan/dscc4.c23
-rw-r--r--drivers/net/wan/farsync.c27
-rw-r--r--drivers/net/wan/hdlc_fr.c2
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c10
-rw-r--r--drivers/net/wan/lmc/lmc_media.c8
-rw-r--r--drivers/net/wan/pc300.h16
-rw-r--r--drivers/net/wan/pc300_drv.c87
-rw-r--r--drivers/net/wan/pc300_tty.c18
-rw-r--r--drivers/net/wan/sdla.c20
-rw-r--r--drivers/net/wan/sdla_fr.c4
-rw-r--r--drivers/net/wan/sdla_x25.c8
-rw-r--r--drivers/net/wan/sdladrv.c16
-rw-r--r--drivers/net/wan/syncppp.c10
-rw-r--r--drivers/net/wireless/airo.c92
-rw-r--r--drivers/net/wireless/airo_cs.c4
-rw-r--r--drivers/net/wireless/airport.c19
-rw-r--r--drivers/net/wireless/atmel.c30
-rw-r--r--drivers/net/wireless/atmel_cs.c3
-rw-r--r--drivers/net/wireless/hermes.c49
-rw-r--r--drivers/net/wireless/hermes.h113
-rw-r--r--drivers/net/wireless/hostap/hostap.c6
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c43
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c28
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c80
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h6
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c50
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c22
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c32
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c21
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c11
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/ipw2100.c24
-rw-r--r--drivers/net/wireless/ipw2100.h2
-rw-r--r--drivers/net/wireless/ipw2200.c31
-rw-r--r--drivers/net/wireless/ipw2200.h4
-rw-r--r--drivers/net/wireless/netwave_cs.c185
-rw-r--r--drivers/net/wireless/orinoco.c248
-rw-r--r--drivers/net/wireless/orinoco.h16
-rw-r--r--drivers/net/wireless/orinoco_cs.c110
-rw-r--r--drivers/net/wireless/orinoco_nortel.c20
-rw-r--r--drivers/net/wireless/orinoco_pci.c18
-rw-r--r--drivers/net/wireless/orinoco_plx.c18
-rw-r--r--drivers/net/wireless/orinoco_tmd.c18
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c13
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c7
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c9
-rw-r--r--drivers/net/wireless/ray_cs.c46
-rw-r--r--drivers/net/wireless/spectrum_cs.c79
-rw-r--r--drivers/net/wireless/strip.c38
-rw-r--r--drivers/net/wireless/wavelan.c8
-rw-r--r--drivers/net/wireless/wavelan.p.h4
-rw-r--r--drivers/net/wireless/wavelan_cs.c8
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h4
-rw-r--r--drivers/net/wireless/wl3501.h2
192 files changed, 15509 insertions, 7204 deletions
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index bc537440ca02..f822cd3025ff 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1027,8 +1027,7 @@ static void cp_reset_hw (struct cp_private *cp)
1027 if (!(cpr8(Cmd) & CmdReset)) 1027 if (!(cpr8(Cmd) & CmdReset))
1028 return; 1028 return;
1029 1029
1030 set_current_state(TASK_UNINTERRUPTIBLE); 1030 schedule_timeout_uninterruptible(10);
1031 schedule_timeout(10);
1032 } 1031 }
1033 1032
1034 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name); 1033 printk(KERN_ERR "%s: hardware reset timeout\n", cp->dev->name);
@@ -1575,6 +1574,7 @@ static struct ethtool_ops cp_ethtool_ops = {
1575 .set_wol = cp_set_wol, 1574 .set_wol = cp_set_wol,
1576 .get_strings = cp_get_strings, 1575 .get_strings = cp_get_strings,
1577 .get_ethtool_stats = cp_get_ethtool_stats, 1576 .get_ethtool_stats = cp_get_ethtool_stats,
1577 .get_perm_addr = ethtool_op_get_perm_addr,
1578}; 1578};
1579 1579
1580static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) 1580static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1773,6 +1773,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1773 for (i = 0; i < 3; i++) 1773 for (i = 0; i < 3; i++)
1774 ((u16 *) (dev->dev_addr))[i] = 1774 ((u16 *) (dev->dev_addr))[i] =
1775 le16_to_cpu (read_eeprom (regs, i + 7, addr_len)); 1775 le16_to_cpu (read_eeprom (regs, i + 7, addr_len));
1776 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1776 1777
1777 dev->open = cp_open; 1778 dev->open = cp_open;
1778 dev->stop = cp_close; 1779 dev->stop = cp_close;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4c2cf7bbd252..30bee11c48bd 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -552,7 +552,8 @@ const static struct {
552 552
553 { "RTL-8100B/8139D", 553 { "RTL-8100B/8139D",
554 HW_REVID(1, 1, 1, 0, 1, 0, 1), 554 HW_REVID(1, 1, 1, 0, 1, 0, 1),
555 HasLWake, 555 HasHltClk /* XXX undocumented? */
556 | HasLWake,
556 }, 557 },
557 558
558 { "RTL-8101", 559 { "RTL-8101",
@@ -970,6 +971,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
970 for (i = 0; i < 3; i++) 971 for (i = 0; i < 3; i++)
971 ((u16 *) (dev->dev_addr))[i] = 972 ((u16 *) (dev->dev_addr))[i] =
972 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); 973 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
974 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
973 975
974 /* The Rtl8139-specific entries in the device structure. */ 976 /* The Rtl8139-specific entries in the device structure. */
975 dev->open = rtl8139_open; 977 dev->open = rtl8139_open;
@@ -2465,6 +2467,7 @@ static struct ethtool_ops rtl8139_ethtool_ops = {
2465 .get_strings = rtl8139_get_strings, 2467 .get_strings = rtl8139_get_strings,
2466 .get_stats_count = rtl8139_get_stats_count, 2468 .get_stats_count = rtl8139_get_stats_count,
2467 .get_ethtool_stats = rtl8139_get_ethtool_stats, 2469 .get_ethtool_stats = rtl8139_get_ethtool_stats,
2470 .get_perm_addr = ethtool_op_get_perm_addr,
2468}; 2471};
2469 2472
2470static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2473static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c748b0e16419..6d4f9ceb0a32 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -475,6 +475,14 @@ config SGI_IOC3_ETH_HW_TX_CSUM
475 the moment only acceleration of IPv4 is supported. This option 475 the moment only acceleration of IPv4 is supported. This option
476 enables offloading for checksums on transmit. If unsure, say Y. 476 enables offloading for checksums on transmit. If unsure, say Y.
477 477
478config MIPS_SIM_NET
479 tristate "MIPS simulator Network device (EXPERIMENTAL)"
480 depends on NETDEVICES && MIPS_SIM && EXPERIMENTAL
481 help
482 The MIPSNET device is a simple Ethernet network device which is
483 emulated by the MIPS Simulator.
484 If you are not using a MIPSsim or are unsure, say N.
485
478config SGI_O2MACE_ETH 486config SGI_O2MACE_ETH
479 tristate "SGI O2 MACE Fast Ethernet support" 487 tristate "SGI O2 MACE Fast Ethernet support"
480 depends on NET_ETHERNET && SGI_IP32=y 488 depends on NET_ETHERNET && SGI_IP32=y
@@ -1155,38 +1163,74 @@ config IBMVETH
1155 be called ibmveth. 1163 be called ibmveth.
1156 1164
1157config IBM_EMAC 1165config IBM_EMAC
1158 bool "IBM PPC4xx EMAC driver support" 1166 tristate "PowerPC 4xx on-chip Ethernet support"
1159 depends on 4xx 1167 depends on 4xx
1160 select CRC32 1168 help
1161 ---help--- 1169 This driver supports the PowerPC 4xx EMAC family of on-chip
1162 This driver supports the IBM PPC4xx EMAC family of on-chip 1170 Ethernet controllers.
1163 Ethernet controllers.
1164
1165config IBM_EMAC_ERRMSG
1166 bool "Verbose error messages"
1167 depends on IBM_EMAC && BROKEN
1168 1171
1169config IBM_EMAC_RXB 1172config IBM_EMAC_RXB
1170 int "Number of receive buffers" 1173 int "Number of receive buffers"
1171 depends on IBM_EMAC 1174 depends on IBM_EMAC
1172 default "128" if IBM_EMAC4 1175 default "128"
1173 default "64"
1174 1176
1175config IBM_EMAC_TXB 1177config IBM_EMAC_TXB
1176 int "Number of transmit buffers" 1178 int "Number of transmit buffers"
1177 depends on IBM_EMAC 1179 depends on IBM_EMAC
1178 default "128" if IBM_EMAC4 1180 default "64"
1179 default "8" 1181
1182config IBM_EMAC_POLL_WEIGHT
1183 int "MAL NAPI polling weight"
1184 depends on IBM_EMAC
1185 default "32"
1180 1186
1181config IBM_EMAC_FGAP 1187config IBM_EMAC_RX_COPY_THRESHOLD
1182 int "Frame gap" 1188 int "RX skb copy threshold (bytes)"
1183 depends on IBM_EMAC 1189 depends on IBM_EMAC
1184 default "8" 1190 default "256"
1185 1191
1186config IBM_EMAC_SKBRES 1192config IBM_EMAC_RX_SKB_HEADROOM
1187 int "Skb reserve amount" 1193 int "Additional RX skb headroom (bytes)"
1188 depends on IBM_EMAC 1194 depends on IBM_EMAC
1189 default "0" 1195 default "0"
1196 help
1197 Additional receive skb headroom. Note, that driver
1198 will always reserve at least 2 bytes to make IP header
1199 aligned, so usualy there is no need to add any additional
1200 headroom.
1201
1202 If unsure, set to 0.
1203
1204config IBM_EMAC_PHY_RX_CLK_FIX
1205 bool "PHY Rx clock workaround"
1206 depends on IBM_EMAC && (405EP || 440GX || 440EP)
1207 help
1208 Enable this if EMAC attached to a PHY which doesn't generate
1209 RX clock if there is no link, if this is the case, you will
1210 see "TX disable timeout" or "RX disable timeout" in the system
1211 log.
1212
1213 If unsure, say N.
1214
1215config IBM_EMAC_DEBUG
1216 bool "Debugging"
1217 depends on IBM_EMAC
1218 default n
1219
1220config IBM_EMAC_ZMII
1221 bool
1222 depends on IBM_EMAC && (NP405H || NP405L || 44x)
1223 default y
1224
1225config IBM_EMAC_RGMII
1226 bool
1227 depends on IBM_EMAC && 440GX
1228 default y
1229
1230config IBM_EMAC_TAH
1231 bool
1232 depends on IBM_EMAC && 440GX
1233 default y
1190 1234
1191config NET_PCI 1235config NET_PCI
1192 bool "EISA, VLB, PCI and on board controllers" 1236 bool "EISA, VLB, PCI and on board controllers"
@@ -1330,7 +1374,7 @@ config FORCEDETH
1330 1374
1331config CS89x0 1375config CS89x0
1332 tristate "CS89x0 support" 1376 tristate "CS89x0 support"
1333 depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105 1377 depends on (NET_PCI && (ISA || ARCH_IXDP2X01)) || ARCH_PNX0105 || MACH_MP1000
1334 ---help--- 1378 ---help---
1335 Support for CS89x0 chipset based Ethernet cards. If you have a 1379 Support for CS89x0 chipset based Ethernet cards. If you have a
1336 network (Ethernet) card of this type, say Y and read the 1380 network (Ethernet) card of this type, say Y and read the
@@ -1767,6 +1811,7 @@ config NE_H8300
1767 controller on the Renesas H8/300 processor. 1811 controller on the Renesas H8/300 processor.
1768 1812
1769source "drivers/net/fec_8xx/Kconfig" 1813source "drivers/net/fec_8xx/Kconfig"
1814source "drivers/net/fs_enet/Kconfig"
1770 1815
1771endmenu 1816endmenu
1772 1817
@@ -2083,6 +2128,7 @@ config SPIDER_NET
2083config GIANFAR 2128config GIANFAR
2084 tristate "Gianfar Ethernet" 2129 tristate "Gianfar Ethernet"
2085 depends on 85xx || 83xx 2130 depends on 85xx || 83xx
2131 select PHYLIB
2086 help 2132 help
2087 This driver supports the Gigabit TSEC on the MPC85xx 2133 This driver supports the Gigabit TSEC on the MPC85xx
2088 family of chips, and the FEC on the 8540 2134 family of chips, and the FEC on the 8540
@@ -2192,8 +2238,8 @@ config S2IO
2192 depends on PCI 2238 depends on PCI
2193 ---help--- 2239 ---help---
2194 This driver supports the 10Gbe XFrame NIC of S2IO. 2240 This driver supports the 10Gbe XFrame NIC of S2IO.
2195 For help regarding driver compilation, installation and 2241 More specific information on configuring the driver is in
2196 tuning please look into ~/drivers/net/s2io/README.txt. 2242 <file:Documentation/networking/s2io.txt>.
2197 2243
2198config S2IO_NAPI 2244config S2IO_NAPI
2199 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)" 2245 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
@@ -2243,6 +2289,20 @@ config ISERIES_VETH
2243 tristate "iSeries Virtual Ethernet driver support" 2289 tristate "iSeries Virtual Ethernet driver support"
2244 depends on PPC_ISERIES 2290 depends on PPC_ISERIES
2245 2291
2292config RIONET
2293 tristate "RapidIO Ethernet over messaging driver support"
2294 depends on NETDEVICES && RAPIDIO
2295
2296config RIONET_TX_SIZE
2297 int "Number of outbound queue entries"
2298 depends on RIONET
2299 default "128"
2300
2301config RIONET_RX_SIZE
2302 int "Number of inbound queue entries"
2303 depends on RIONET
2304 default "128"
2305
2246config FDDI 2306config FDDI
2247 bool "FDDI driver support" 2307 bool "FDDI driver support"
2248 depends on (PCI || EISA) 2308 depends on (PCI || EISA)
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 8aeec9f2495b..7c313cb341b8 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
13obj-$(CONFIG_BONDING) += bonding/ 13obj-$(CONFIG_BONDING) += bonding/
14obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
15 15
16gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_phy.o 16gianfar_driver-objs := gianfar.o gianfar_ethtool.o gianfar_mii.o
17 17
18# 18#
19# link order important here 19# link order important here
@@ -64,6 +64,7 @@ obj-$(CONFIG_SKFP) += skfp/
64obj-$(CONFIG_VIA_RHINE) += via-rhine.o 64obj-$(CONFIG_VIA_RHINE) += via-rhine.o
65obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 65obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
66obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o 66obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
67obj-$(CONFIG_RIONET) += rionet.o
67 68
68# 69#
69# end link order section 70# end link order section
@@ -166,6 +167,7 @@ obj-$(CONFIG_EQUALIZER) += eql.o
166obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 167obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
167obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o 168obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o
168obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o 169obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
170obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
169obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o 171obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
170obj-$(CONFIG_DECLANCE) += declance.o 172obj-$(CONFIG_DECLANCE) += declance.o
171obj-$(CONFIG_ATARILANCE) += atarilance.o 173obj-$(CONFIG_ATARILANCE) += atarilance.o
@@ -201,3 +203,6 @@ obj-$(CONFIG_IRDA) += irda/
201obj-$(CONFIG_ETRAX_ETHERNET) += cris/ 203obj-$(CONFIG_ETRAX_ETHERNET) += cris/
202 204
203obj-$(CONFIG_NETCONSOLE) += netconsole.o 205obj-$(CONFIG_NETCONSOLE) += netconsole.o
206
207obj-$(CONFIG_FS_ENET) += fs_enet/
208
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index dbecc6bf7851..b8953de5664a 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -871,10 +871,8 @@ static void ace_init_cleanup(struct net_device *dev)
871 if (ap->info) 871 if (ap->info)
872 pci_free_consistent(ap->pdev, sizeof(struct ace_info), 872 pci_free_consistent(ap->pdev, sizeof(struct ace_info),
873 ap->info, ap->info_dma); 873 ap->info, ap->info_dma);
874 if (ap->skb) 874 kfree(ap->skb);
875 kfree(ap->skb); 875 kfree(ap->trace_buf);
876 if (ap->trace_buf)
877 kfree(ap->trace_buf);
878 876
879 if (dev->irq) 877 if (dev->irq)
880 free_irq(dev->irq, dev); 878 free_irq(dev->irq, dev);
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index d9ba8be72af8..d9ba8be72af8 100755..100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index cfe3a4298822..cfe3a4298822 100755..100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index c56d86d371a9..3d50e953faaa 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -29,6 +29,7 @@
29 29
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/hardware.h>
32#include <asm/io.h> 33#include <asm/io.h>
33 34
34#define TX_BUFFERS 15 35#define TX_BUFFERS 15
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index c82b9cd1c924..332e9953c55c 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -151,13 +151,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
151 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ 151 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
152 SUPPORTED_Autoneg 152 SUPPORTED_Autoneg
153 153
154static char *phy_link[] =
155{ "unknown",
156 "10Base2", "10BaseT",
157 "AUI",
158 "100BaseT", "100BaseTX", "100BaseFX"
159};
160
161int bcm_5201_init(struct net_device *dev, int phy_addr) 154int bcm_5201_init(struct net_device *dev, int phy_addr)
162{ 155{
163 s16 data; 156 s16 data;
@@ -785,6 +778,7 @@ static struct mii_chip_info {
785 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0}, 778 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
786 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0}, 779 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
787 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1}, 780 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
781 {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
788 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0}, 782 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
789 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0}, 783 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
790 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0}, 784 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
@@ -1045,7 +1039,7 @@ found:
1045#endif 1039#endif
1046 1040
1047 if (aup->mii->chip_info == NULL) { 1041 if (aup->mii->chip_info == NULL) {
1048 printk(KERN_ERR "%s: Au1x No MII transceivers found!\n", 1042 printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
1049 dev->name); 1043 dev->name);
1050 return -1; 1044 return -1;
1051 } 1045 }
@@ -1546,6 +1540,9 @@ au1000_probe(u32 ioaddr, int irq, int port_num)
1546 printk(KERN_ERR "%s: out of memory\n", dev->name); 1540 printk(KERN_ERR "%s: out of memory\n", dev->name);
1547 goto err_out; 1541 goto err_out;
1548 } 1542 }
1543 aup->mii->next = NULL;
1544 aup->mii->chip_info = NULL;
1545 aup->mii->status = 0;
1549 aup->mii->mii_control_reg = 0; 1546 aup->mii->mii_control_reg = 0;
1550 aup->mii->mii_data_reg = 0; 1547 aup->mii->mii_data_reg = 0;
1551 1548
@@ -1609,8 +1606,7 @@ err_out:
1609 /* here we should have a valid dev plus aup-> register addresses 1606 /* here we should have a valid dev plus aup-> register addresses
1610 * so we can reset the mac properly.*/ 1607 * so we can reset the mac properly.*/
1611 reset_mac(dev); 1608 reset_mac(dev);
1612 if (aup->mii) 1609 kfree(aup->mii);
1613 kfree(aup->mii);
1614 for (i = 0; i < NUM_RX_DMA; i++) { 1610 for (i = 0; i < NUM_RX_DMA; i++) {
1615 if (aup->rx_db_inuse[i]) 1611 if (aup->rx_db_inuse[i])
1616 ReleaseDB(aup, aup->rx_db_inuse[i]); 1612 ReleaseDB(aup, aup->rx_db_inuse[i]);
@@ -1809,8 +1805,7 @@ static void __exit au1000_cleanup_module(void)
1809 if (dev) { 1805 if (dev) {
1810 aup = (struct au1000_private *) dev->priv; 1806 aup = (struct au1000_private *) dev->priv;
1811 unregister_netdev(dev); 1807 unregister_netdev(dev);
1812 if (aup->mii) 1808 kfree(aup->mii);
1813 kfree(aup->mii);
1814 for (j = 0; j < NUM_RX_DMA; j++) { 1809 for (j = 0; j < NUM_RX_DMA; j++) {
1815 if (aup->rx_db_inuse[j]) 1810 if (aup->rx_db_inuse[j])
1816 ReleaseDB(aup, aup->rx_db_inuse[j]); 1811 ReleaseDB(aup, aup->rx_db_inuse[j]);
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 94939f570f78..0ee3e27969c6 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -19,6 +19,7 @@
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/version.h> 21#include <linux/version.h>
22#include <linux/dma-mapping.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/io.h> 25#include <asm/io.h>
@@ -106,6 +107,29 @@ static int b44_poll(struct net_device *dev, int *budget);
106static void b44_poll_controller(struct net_device *dev); 107static void b44_poll_controller(struct net_device *dev);
107#endif 108#endif
108 109
110static int dma_desc_align_mask;
111static int dma_desc_sync_size;
112
113static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
114 dma_addr_t dma_base,
115 unsigned long offset,
116 enum dma_data_direction dir)
117{
118 dma_sync_single_range_for_device(&pdev->dev, dma_base,
119 offset & dma_desc_align_mask,
120 dma_desc_sync_size, dir);
121}
122
123static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
124 dma_addr_t dma_base,
125 unsigned long offset,
126 enum dma_data_direction dir)
127{
128 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
129 offset & dma_desc_align_mask,
130 dma_desc_sync_size, dir);
131}
132
109static inline unsigned long br32(const struct b44 *bp, unsigned long reg) 133static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
110{ 134{
111 return readl(bp->regs + reg); 135 return readl(bp->regs + reg);
@@ -668,6 +692,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
668 dp->ctrl = cpu_to_le32(ctrl); 692 dp->ctrl = cpu_to_le32(ctrl);
669 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); 693 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
670 694
695 if (bp->flags & B44_FLAG_RX_RING_HACK)
696 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
697 dest_idx * sizeof(dp),
698 DMA_BIDIRECTIONAL);
699
671 return RX_PKT_BUF_SZ; 700 return RX_PKT_BUF_SZ;
672} 701}
673 702
@@ -692,6 +721,11 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
692 pci_unmap_addr_set(dest_map, mapping, 721 pci_unmap_addr_set(dest_map, mapping,
693 pci_unmap_addr(src_map, mapping)); 722 pci_unmap_addr(src_map, mapping));
694 723
724 if (bp->flags & B44_FLAG_RX_RING_HACK)
725 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
726 src_idx * sizeof(src_desc),
727 DMA_BIDIRECTIONAL);
728
695 ctrl = src_desc->ctrl; 729 ctrl = src_desc->ctrl;
696 if (dest_idx == (B44_RX_RING_SIZE - 1)) 730 if (dest_idx == (B44_RX_RING_SIZE - 1))
697 ctrl |= cpu_to_le32(DESC_CTRL_EOT); 731 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
@@ -700,8 +734,14 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
700 734
701 dest_desc->ctrl = ctrl; 735 dest_desc->ctrl = ctrl;
702 dest_desc->addr = src_desc->addr; 736 dest_desc->addr = src_desc->addr;
737
703 src_map->skb = NULL; 738 src_map->skb = NULL;
704 739
740 if (bp->flags & B44_FLAG_RX_RING_HACK)
741 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
742 dest_idx * sizeof(dest_desc),
743 DMA_BIDIRECTIONAL);
744
705 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr, 745 pci_dma_sync_single_for_device(bp->pdev, src_desc->addr,
706 RX_PKT_BUF_SZ, 746 RX_PKT_BUF_SZ,
707 PCI_DMA_FROMDEVICE); 747 PCI_DMA_FROMDEVICE);
@@ -959,6 +999,11 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
959 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); 999 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
960 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); 1000 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
961 1001
1002 if (bp->flags & B44_FLAG_TX_RING_HACK)
1003 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1004 entry * sizeof(bp->tx_ring[0]),
1005 DMA_TO_DEVICE);
1006
962 entry = NEXT_TX(entry); 1007 entry = NEXT_TX(entry);
963 1008
964 bp->tx_prod = entry; 1009 bp->tx_prod = entry;
@@ -1064,6 +1109,16 @@ static void b44_init_rings(struct b44 *bp)
1064 memset(bp->rx_ring, 0, B44_RX_RING_BYTES); 1109 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1065 memset(bp->tx_ring, 0, B44_TX_RING_BYTES); 1110 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1066 1111
1112 if (bp->flags & B44_FLAG_RX_RING_HACK)
1113 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1114 DMA_TABLE_BYTES,
1115 PCI_DMA_BIDIRECTIONAL);
1116
1117 if (bp->flags & B44_FLAG_TX_RING_HACK)
1118 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1119 DMA_TABLE_BYTES,
1120 PCI_DMA_TODEVICE);
1121
1067 for (i = 0; i < bp->rx_pending; i++) { 1122 for (i = 0; i < bp->rx_pending; i++) {
1068 if (b44_alloc_rx_skb(bp, -1, i) < 0) 1123 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1069 break; 1124 break;
@@ -1076,23 +1131,33 @@ static void b44_init_rings(struct b44 *bp)
1076 */ 1131 */
1077static void b44_free_consistent(struct b44 *bp) 1132static void b44_free_consistent(struct b44 *bp)
1078{ 1133{
1079 if (bp->rx_buffers) { 1134 kfree(bp->rx_buffers);
1080 kfree(bp->rx_buffers); 1135 bp->rx_buffers = NULL;
1081 bp->rx_buffers = NULL; 1136 kfree(bp->tx_buffers);
1082 } 1137 bp->tx_buffers = NULL;
1083 if (bp->tx_buffers) {
1084 kfree(bp->tx_buffers);
1085 bp->tx_buffers = NULL;
1086 }
1087 if (bp->rx_ring) { 1138 if (bp->rx_ring) {
1088 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, 1139 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1089 bp->rx_ring, bp->rx_ring_dma); 1140 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1141 DMA_TABLE_BYTES,
1142 DMA_BIDIRECTIONAL);
1143 kfree(bp->rx_ring);
1144 } else
1145 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1146 bp->rx_ring, bp->rx_ring_dma);
1090 bp->rx_ring = NULL; 1147 bp->rx_ring = NULL;
1148 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1091 } 1149 }
1092 if (bp->tx_ring) { 1150 if (bp->tx_ring) {
1093 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES, 1151 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1094 bp->tx_ring, bp->tx_ring_dma); 1152 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1153 DMA_TABLE_BYTES,
1154 DMA_TO_DEVICE);
1155 kfree(bp->tx_ring);
1156 } else
1157 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1158 bp->tx_ring, bp->tx_ring_dma);
1095 bp->tx_ring = NULL; 1159 bp->tx_ring = NULL;
1160 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1096 } 1161 }
1097} 1162}
1098 1163
@@ -1118,12 +1183,56 @@ static int b44_alloc_consistent(struct b44 *bp)
1118 1183
1119 size = DMA_TABLE_BYTES; 1184 size = DMA_TABLE_BYTES;
1120 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); 1185 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1121 if (!bp->rx_ring) 1186 if (!bp->rx_ring) {
1122 goto out_err; 1187 /* Allocation may have failed due to pci_alloc_consistent
1188 insisting on use of GFP_DMA, which is more restrictive
1189 than necessary... */
1190 struct dma_desc *rx_ring;
1191 dma_addr_t rx_ring_dma;
1192
1193 if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
1194 goto out_err;
1195
1196 memset(rx_ring, 0, size);
1197 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1198 DMA_TABLE_BYTES,
1199 DMA_BIDIRECTIONAL);
1200
1201 if (rx_ring_dma + size > B44_DMA_MASK) {
1202 kfree(rx_ring);
1203 goto out_err;
1204 }
1205
1206 bp->rx_ring = rx_ring;
1207 bp->rx_ring_dma = rx_ring_dma;
1208 bp->flags |= B44_FLAG_RX_RING_HACK;
1209 }
1123 1210
1124 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma); 1211 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1125 if (!bp->tx_ring) 1212 if (!bp->tx_ring) {
1126 goto out_err; 1213 /* Allocation may have failed due to pci_alloc_consistent
1214 insisting on use of GFP_DMA, which is more restrictive
1215 than necessary... */
1216 struct dma_desc *tx_ring;
1217 dma_addr_t tx_ring_dma;
1218
1219 if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL)))
1220 goto out_err;
1221
1222 memset(tx_ring, 0, size);
1223 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1224 DMA_TABLE_BYTES,
1225 DMA_TO_DEVICE);
1226
1227 if (tx_ring_dma + size > B44_DMA_MASK) {
1228 kfree(tx_ring);
1229 goto out_err;
1230 }
1231
1232 bp->tx_ring = tx_ring;
1233 bp->tx_ring_dma = tx_ring_dma;
1234 bp->flags |= B44_FLAG_TX_RING_HACK;
1235 }
1127 1236
1128 return 0; 1237 return 0;
1129 1238
@@ -1507,14 +1616,14 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1507 1616
1508 cmd->advertising = 0; 1617 cmd->advertising = 0;
1509 if (bp->flags & B44_FLAG_ADV_10HALF) 1618 if (bp->flags & B44_FLAG_ADV_10HALF)
1510 cmd->advertising |= ADVERTISE_10HALF; 1619 cmd->advertising |= ADVERTISED_10baseT_Half;
1511 if (bp->flags & B44_FLAG_ADV_10FULL) 1620 if (bp->flags & B44_FLAG_ADV_10FULL)
1512 cmd->advertising |= ADVERTISE_10FULL; 1621 cmd->advertising |= ADVERTISED_10baseT_Full;
1513 if (bp->flags & B44_FLAG_ADV_100HALF) 1622 if (bp->flags & B44_FLAG_ADV_100HALF)
1514 cmd->advertising |= ADVERTISE_100HALF; 1623 cmd->advertising |= ADVERTISED_100baseT_Half;
1515 if (bp->flags & B44_FLAG_ADV_100FULL) 1624 if (bp->flags & B44_FLAG_ADV_100FULL)
1516 cmd->advertising |= ADVERTISE_100FULL; 1625 cmd->advertising |= ADVERTISED_100baseT_Full;
1517 cmd->advertising |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1626 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1518 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ? 1627 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1519 SPEED_100 : SPEED_10; 1628 SPEED_100 : SPEED_10;
1520 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ? 1629 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
@@ -1676,6 +1785,7 @@ static struct ethtool_ops b44_ethtool_ops = {
1676 .set_pauseparam = b44_set_pauseparam, 1785 .set_pauseparam = b44_set_pauseparam,
1677 .get_msglevel = b44_get_msglevel, 1786 .get_msglevel = b44_get_msglevel,
1678 .set_msglevel = b44_set_msglevel, 1787 .set_msglevel = b44_set_msglevel,
1788 .get_perm_addr = ethtool_op_get_perm_addr,
1679}; 1789};
1680 1790
1681static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1791static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1718,6 +1828,7 @@ static int __devinit b44_get_invariants(struct b44 *bp)
1718 bp->dev->dev_addr[3] = eeprom[80]; 1828 bp->dev->dev_addr[3] = eeprom[80];
1719 bp->dev->dev_addr[4] = eeprom[83]; 1829 bp->dev->dev_addr[4] = eeprom[83];
1720 bp->dev->dev_addr[5] = eeprom[82]; 1830 bp->dev->dev_addr[5] = eeprom[82];
1831 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
1721 1832
1722 bp->phy_addr = eeprom[90] & 0x1f; 1833 bp->phy_addr = eeprom[90] & 0x1f;
1723 1834
@@ -1930,6 +2041,8 @@ static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
1930 b44_free_rings(bp); 2041 b44_free_rings(bp);
1931 2042
1932 spin_unlock_irq(&bp->lock); 2043 spin_unlock_irq(&bp->lock);
2044
2045 free_irq(dev->irq, dev);
1933 pci_disable_device(pdev); 2046 pci_disable_device(pdev);
1934 return 0; 2047 return 0;
1935} 2048}
@@ -1946,6 +2059,9 @@ static int b44_resume(struct pci_dev *pdev)
1946 if (!netif_running(dev)) 2059 if (!netif_running(dev))
1947 return 0; 2060 return 0;
1948 2061
2062 if (request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev))
2063 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2064
1949 spin_lock_irq(&bp->lock); 2065 spin_lock_irq(&bp->lock);
1950 2066
1951 b44_init_rings(bp); 2067 b44_init_rings(bp);
@@ -1971,6 +2087,12 @@ static struct pci_driver b44_driver = {
1971 2087
1972static int __init b44_init(void) 2088static int __init b44_init(void)
1973{ 2089{
2090 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2091
2092 /* Setup paramaters for syncing RX/TX DMA descriptors */
2093 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2094 dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc));
2095
1974 return pci_module_init(&b44_driver); 2096 return pci_module_init(&b44_driver);
1975} 2097}
1976 2098
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 11c40a2e71c7..593cb0ad4100 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -400,6 +400,8 @@ struct b44 {
400#define B44_FLAG_ADV_100HALF 0x04000000 400#define B44_FLAG_ADV_100HALF 0x04000000
401#define B44_FLAG_ADV_100FULL 0x08000000 401#define B44_FLAG_ADV_100FULL 0x08000000
402#define B44_FLAG_INTERNAL_PHY 0x10000000 402#define B44_FLAG_INTERNAL_PHY 0x10000000
403#define B44_FLAG_RX_RING_HACK 0x20000000
404#define B44_FLAG_TX_RING_HACK 0x40000000
403 405
404 u32 rx_offset; 406 u32 rx_offset;
405 407
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 0ee28899fb8d..bbca8ae8018c 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -1690,10 +1690,8 @@ static void __exit bmac_exit(void)
1690{ 1690{
1691 macio_unregister_driver(&bmac_driver); 1691 macio_unregister_driver(&bmac_driver);
1692 1692
1693 if (bmac_emergency_rxbuf != NULL) { 1693 kfree(bmac_emergency_rxbuf);
1694 kfree(bmac_emergency_rxbuf); 1694 bmac_emergency_rxbuf = NULL;
1695 bmac_emergency_rxbuf = NULL;
1696 }
1697} 1695}
1698 1696
1699MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); 1697MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 3a2ace01e444..11d252318221 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -314,20 +314,16 @@ bnx2_free_mem(struct bnx2 *bp)
314 bp->tx_desc_ring, bp->tx_desc_mapping); 314 bp->tx_desc_ring, bp->tx_desc_mapping);
315 bp->tx_desc_ring = NULL; 315 bp->tx_desc_ring = NULL;
316 } 316 }
317 if (bp->tx_buf_ring) { 317 kfree(bp->tx_buf_ring);
318 kfree(bp->tx_buf_ring); 318 bp->tx_buf_ring = NULL;
319 bp->tx_buf_ring = NULL;
320 }
321 if (bp->rx_desc_ring) { 319 if (bp->rx_desc_ring) {
322 pci_free_consistent(bp->pdev, 320 pci_free_consistent(bp->pdev,
323 sizeof(struct rx_bd) * RX_DESC_CNT, 321 sizeof(struct rx_bd) * RX_DESC_CNT,
324 bp->rx_desc_ring, bp->rx_desc_mapping); 322 bp->rx_desc_ring, bp->rx_desc_mapping);
325 bp->rx_desc_ring = NULL; 323 bp->rx_desc_ring = NULL;
326 } 324 }
327 if (bp->rx_buf_ring) { 325 kfree(bp->rx_buf_ring);
328 kfree(bp->rx_buf_ring); 326 bp->rx_buf_ring = NULL;
329 bp->rx_buf_ring = NULL;
330 }
331} 327}
332 328
333static int 329static int
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f264ff162979..8032126fd589 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4241,6 +4241,43 @@ out:
4241 return 0; 4241 return 0;
4242} 4242}
4243 4243
4244static void bond_activebackup_xmit_copy(struct sk_buff *skb,
4245 struct bonding *bond,
4246 struct slave *slave)
4247{
4248 struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
4249 struct ethhdr *eth_data;
4250 u8 *hwaddr;
4251 int res;
4252
4253 if (!skb2) {
4254 printk(KERN_ERR DRV_NAME ": Error: "
4255 "bond_activebackup_xmit_copy(): skb_copy() failed\n");
4256 return;
4257 }
4258
4259 skb2->mac.raw = (unsigned char *)skb2->data;
4260 eth_data = eth_hdr(skb2);
4261
4262 /* Pick an appropriate source MAC address
4263 * -- use slave's perm MAC addr, unless used by bond
4264 * -- otherwise, borrow active slave's perm MAC addr
4265 * since that will not be used
4266 */
4267 hwaddr = slave->perm_hwaddr;
4268 if (!memcmp(eth_data->h_source, hwaddr, ETH_ALEN))
4269 hwaddr = bond->curr_active_slave->perm_hwaddr;
4270
4271 /* Set source MAC address appropriately */
4272 memcpy(eth_data->h_source, hwaddr, ETH_ALEN);
4273
4274 res = bond_dev_queue_xmit(bond, skb2, slave->dev);
4275 if (res)
4276 dev_kfree_skb(skb2);
4277
4278 return;
4279}
4280
4244/* 4281/*
4245 * in active-backup mode, we know that bond->curr_active_slave is always valid if 4282 * in active-backup mode, we know that bond->curr_active_slave is always valid if
4246 * the bond has a usable interface. 4283 * the bond has a usable interface.
@@ -4257,10 +4294,26 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
4257 goto out; 4294 goto out;
4258 } 4295 }
4259 4296
4260 if (bond->curr_active_slave) { /* one usable interface */ 4297 if (!bond->curr_active_slave)
4261 res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev); 4298 goto out;
4299
4300 /* Xmit IGMP frames on all slaves to ensure rapid fail-over
4301 for multicast traffic on snooping switches */
4302 if (skb->protocol == __constant_htons(ETH_P_IP) &&
4303 skb->nh.iph->protocol == IPPROTO_IGMP) {
4304 struct slave *slave, *active_slave;
4305 int i;
4306
4307 active_slave = bond->curr_active_slave;
4308 bond_for_each_slave_from_to(bond, slave, i, active_slave->next,
4309 active_slave->prev)
4310 if (IS_UP(slave->dev) &&
4311 (slave->link == BOND_LINK_UP))
4312 bond_activebackup_xmit_copy(skb, bond, slave);
4262 } 4313 }
4263 4314
4315 res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
4316
4264out: 4317out:
4265 if (res) { 4318 if (res) {
4266 /* no suitable interface, frame not sent */ 4319 /* no suitable interface, frame not sent */
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 2e617424d3fb..50f43dbf31ae 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -489,7 +489,7 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
489/* local page allocation routines for the receive buffers. jumbo pages 489/* local page allocation routines for the receive buffers. jumbo pages
490 * require at least 8K contiguous and 8K aligned buffers. 490 * require at least 8K contiguous and 8K aligned buffers.
491 */ 491 */
492static cas_page_t *cas_page_alloc(struct cas *cp, const int flags) 492static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
493{ 493{
494 cas_page_t *page; 494 cas_page_t *page;
495 495
@@ -561,7 +561,7 @@ static void cas_spare_free(struct cas *cp)
561} 561}
562 562
563/* replenish spares if needed */ 563/* replenish spares if needed */
564static void cas_spare_recover(struct cas *cp, const int flags) 564static void cas_spare_recover(struct cas *cp, const gfp_t flags)
565{ 565{
566 struct list_head list, *elem, *tmp; 566 struct list_head list, *elem, *tmp;
567 int needed, i; 567 int needed, i;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index a6078ad9b654..bfdae10036ed 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -182,6 +182,10 @@ static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
182#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */ 182#define CIRRUS_DEFAULT_IRQ VH_INTC_INT_NUM_CASCADED_INTERRUPT_1 /* Event inputs bank 1 - ID 35/bit 3 */
183static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0}; 183static unsigned int netcard_portlist[] __initdata = {CIRRUS_DEFAULT_BASE, 0};
184static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0}; 184static unsigned int cs8900_irq_map[] = {CIRRUS_DEFAULT_IRQ, 0, 0, 0};
185#elif defined(CONFIG_MACH_MP1000)
186#include <asm/arch/mp1000-seprom.h>
187static unsigned int netcard_portlist[] __initdata = {MP1000_EIO_BASE+0x300, 0};
188static unsigned int cs8900_irq_map[] = {IRQ_EINT3,0,0,0};
185#else 189#else
186static unsigned int netcard_portlist[] __initdata = 190static unsigned int netcard_portlist[] __initdata =
187 { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0}; 191 { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
@@ -590,6 +594,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
590 cnt -= j; 594 cnt -= j;
591 } 595 }
592 } else 596 } else
597#elif defined(CONFIG_MACH_MP1000)
598 if (1) {
599 memcpy(dev->dev_addr, get_eeprom_mac_address(), ETH_ALEN);
600 } else
593#endif 601#endif
594 602
595 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) == 603 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
@@ -649,6 +657,10 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
649 if (1) { 657 if (1) {
650 printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n"); 658 printk(KERN_NOTICE "cs89x0: No EEPROM on HiCO.SH4\n");
651 } else 659 } else
660#elif defined(CONFIG_MACH_MP1000)
661 if (1) {
662 lp->force |= FORCE_RJ45;
663 } else
652#endif 664#endif
653 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0) 665 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
654 printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n"); 666 printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
@@ -1231,7 +1243,7 @@ net_open(struct net_device *dev)
1231 else 1243 else
1232#endif 1244#endif
1233 { 1245 {
1234#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105) 1246#if !defined(CONFIG_ARCH_IXDP2X01) && !defined(CONFIG_ARCH_PNX0105) && !defined(CONFIG_MACH_MP1000)
1235 if (((1 << dev->irq) & lp->irq_map) == 0) { 1247 if (((1 << dev->irq) & lp->irq_map) == 0) {
1236 printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", 1248 printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
1237 dev->name, dev->irq, lp->irq_map); 1249 dev->name, dev->irq, lp->irq_map);
diff --git a/drivers/net/cs89x0.h b/drivers/net/cs89x0.h
index decea264f121..f19d1ebe0183 100644
--- a/drivers/net/cs89x0.h
+++ b/drivers/net/cs89x0.h
@@ -16,7 +16,7 @@
16 16
17#include <linux/config.h> 17#include <linux/config.h>
18 18
19#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105) 19#if defined(CONFIG_ARCH_IXDP2X01) || defined(CONFIG_ARCH_PNX0105) || defined (CONFIG_MACH_MP1000)
20/* IXDP2401/IXDP2801 uses dword-aligned register addressing */ 20/* IXDP2401/IXDP2801 uses dword-aligned register addressing */
21#define CS89x0_PORT(reg) ((reg) * 2) 21#define CS89x0_PORT(reg) ((reg) * 2)
22#else 22#else
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 521c83137bf6..f130bdab3fd3 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * adopted from sunlance.c by Richard van den Berg 6 * adopted from sunlance.c by Richard van den Berg
7 * 7 *
8 * Copyright (C) 2002, 2003 Maciej W. Rozycki 8 * Copyright (C) 2002, 2003, 2005 Maciej W. Rozycki
9 * 9 *
10 * additional sources: 10 * additional sources:
11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification, 11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
@@ -57,13 +57,15 @@
57#include <linux/string.h> 57#include <linux/string.h>
58 58
59#include <asm/addrspace.h> 59#include <asm/addrspace.h>
60#include <asm/system.h>
61
60#include <asm/dec/interrupts.h> 62#include <asm/dec/interrupts.h>
61#include <asm/dec/ioasic.h> 63#include <asm/dec/ioasic.h>
62#include <asm/dec/ioasic_addrs.h> 64#include <asm/dec/ioasic_addrs.h>
63#include <asm/dec/kn01.h> 65#include <asm/dec/kn01.h>
64#include <asm/dec/machtype.h> 66#include <asm/dec/machtype.h>
67#include <asm/dec/system.h>
65#include <asm/dec/tc.h> 68#include <asm/dec/tc.h>
66#include <asm/system.h>
67 69
68static char version[] __devinitdata = 70static char version[] __devinitdata =
69"declance.c: v0.009 by Linux MIPS DECstation task force\n"; 71"declance.c: v0.009 by Linux MIPS DECstation task force\n";
@@ -79,10 +81,6 @@ MODULE_LICENSE("GPL");
79#define PMAD_LANCE 2 81#define PMAD_LANCE 2
80#define PMAX_LANCE 3 82#define PMAX_LANCE 3
81 83
82#ifndef CONFIG_TC
83unsigned long system_base;
84unsigned long dmaptr;
85#endif
86 84
87#define LE_CSR0 0 85#define LE_CSR0 0
88#define LE_CSR1 1 86#define LE_CSR1 1
@@ -237,7 +235,7 @@ struct lance_init_block {
237/* 235/*
238 * This works *only* for the ring descriptors 236 * This works *only* for the ring descriptors
239 */ 237 */
240#define LANCE_ADDR(x) (PHYSADDR(x) >> 1) 238#define LANCE_ADDR(x) (CPHYSADDR(x) >> 1)
241 239
242struct lance_private { 240struct lance_private {
243 struct net_device *next; 241 struct net_device *next;
@@ -697,12 +695,13 @@ out:
697 spin_unlock(&lp->lock); 695 spin_unlock(&lp->lock);
698} 696}
699 697
700static void lance_dma_merr_int(const int irq, void *dev_id, 698static irqreturn_t lance_dma_merr_int(const int irq, void *dev_id,
701 struct pt_regs *regs) 699 struct pt_regs *regs)
702{ 700{
703 struct net_device *dev = (struct net_device *) dev_id; 701 struct net_device *dev = (struct net_device *) dev_id;
704 702
705 printk("%s: DMA error\n", dev->name); 703 printk("%s: DMA error\n", dev->name);
704 return IRQ_HANDLED;
706} 705}
707 706
708static irqreturn_t 707static irqreturn_t
@@ -1026,10 +1025,6 @@ static int __init dec_lance_init(const int type, const int slot)
1026 unsigned long esar_base; 1025 unsigned long esar_base;
1027 unsigned char *esar; 1026 unsigned char *esar;
1028 1027
1029#ifndef CONFIG_TC
1030 system_base = KN01_LANCE_BASE;
1031#endif
1032
1033 if (dec_lance_debug && version_printed++ == 0) 1028 if (dec_lance_debug && version_printed++ == 0)
1034 printk(version); 1029 printk(version);
1035 1030
@@ -1062,16 +1057,16 @@ static int __init dec_lance_init(const int type, const int slot)
1062 switch (type) { 1057 switch (type) {
1063#ifdef CONFIG_TC 1058#ifdef CONFIG_TC
1064 case ASIC_LANCE: 1059 case ASIC_LANCE:
1065 dev->base_addr = system_base + IOASIC_LANCE; 1060 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
1066 1061
1067 /* buffer space for the on-board LANCE shared memory */ 1062 /* buffer space for the on-board LANCE shared memory */
1068 /* 1063 /*
1069 * FIXME: ugly hack! 1064 * FIXME: ugly hack!
1070 */ 1065 */
1071 dev->mem_start = KSEG1ADDR(0x00020000); 1066 dev->mem_start = CKSEG1ADDR(0x00020000);
1072 dev->mem_end = dev->mem_start + 0x00020000; 1067 dev->mem_end = dev->mem_start + 0x00020000;
1073 dev->irq = dec_interrupt[DEC_IRQ_LANCE]; 1068 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1074 esar_base = system_base + IOASIC_ESAR; 1069 esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
1075 1070
1076 /* Workaround crash with booting KN04 2.1k from Disk */ 1071 /* Workaround crash with booting KN04 2.1k from Disk */
1077 memset((void *)dev->mem_start, 0, 1072 memset((void *)dev->mem_start, 0,
@@ -1101,14 +1096,14 @@ static int __init dec_lance_init(const int type, const int slot)
1101 /* Setup I/O ASIC LANCE DMA. */ 1096 /* Setup I/O ASIC LANCE DMA. */
1102 lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR]; 1097 lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
1103 ioasic_write(IO_REG_LANCE_DMA_P, 1098 ioasic_write(IO_REG_LANCE_DMA_P,
1104 PHYSADDR(dev->mem_start) << 3); 1099 CPHYSADDR(dev->mem_start) << 3);
1105 1100
1106 break; 1101 break;
1107 1102
1108 case PMAD_LANCE: 1103 case PMAD_LANCE:
1109 claim_tc_card(slot); 1104 claim_tc_card(slot);
1110 1105
1111 dev->mem_start = get_tc_base_addr(slot); 1106 dev->mem_start = CKSEG1ADDR(get_tc_base_addr(slot));
1112 dev->base_addr = dev->mem_start + 0x100000; 1107 dev->base_addr = dev->mem_start + 0x100000;
1113 dev->irq = get_tc_irq_nr(slot); 1108 dev->irq = get_tc_irq_nr(slot);
1114 esar_base = dev->mem_start + 0x1c0002; 1109 esar_base = dev->mem_start + 0x1c0002;
@@ -1137,9 +1132,9 @@ static int __init dec_lance_init(const int type, const int slot)
1137 1132
1138 case PMAX_LANCE: 1133 case PMAX_LANCE:
1139 dev->irq = dec_interrupt[DEC_IRQ_LANCE]; 1134 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1140 dev->base_addr = KN01_LANCE_BASE; 1135 dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
1141 dev->mem_start = KN01_LANCE_BASE + 0x01000000; 1136 dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
1142 esar_base = KN01_RTC_BASE + 1; 1137 esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
1143 lp->dma_irq = -1; 1138 lp->dma_irq = -1;
1144 1139
1145 /* 1140 /*
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index e54fc10f6846..abce1f730d00 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1140,11 +1140,11 @@ dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, int value)
1140} 1140}
1141 1141
1142static int 1142static int
1143dm9000_drv_suspend(struct device *dev, pm_message_t state, u32 level) 1143dm9000_drv_suspend(struct device *dev, pm_message_t state)
1144{ 1144{
1145 struct net_device *ndev = dev_get_drvdata(dev); 1145 struct net_device *ndev = dev_get_drvdata(dev);
1146 1146
1147 if (ndev && level == SUSPEND_DISABLE) { 1147 if (ndev) {
1148 if (netif_running(ndev)) { 1148 if (netif_running(ndev)) {
1149 netif_device_detach(ndev); 1149 netif_device_detach(ndev);
1150 dm9000_shutdown(ndev); 1150 dm9000_shutdown(ndev);
@@ -1154,12 +1154,12 @@ dm9000_drv_suspend(struct device *dev, pm_message_t state, u32 level)
1154} 1154}
1155 1155
1156static int 1156static int
1157dm9000_drv_resume(struct device *dev, u32 level) 1157dm9000_drv_resume(struct device *dev)
1158{ 1158{
1159 struct net_device *ndev = dev_get_drvdata(dev); 1159 struct net_device *ndev = dev_get_drvdata(dev);
1160 board_info_t *db = (board_info_t *) ndev->priv; 1160 board_info_t *db = (board_info_t *) ndev->priv;
1161 1161
1162 if (ndev && level == RESUME_ENABLE) { 1162 if (ndev) {
1163 1163
1164 if (netif_running(ndev)) { 1164 if (netif_running(ndev)) {
1165 dm9000_reset(db); 1165 dm9000_reset(db);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 40887f09b681..eb169a8e8773 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2201,6 +2201,7 @@ static struct ethtool_ops e100_ethtool_ops = {
2201 .phys_id = e100_phys_id, 2201 .phys_id = e100_phys_id,
2202 .get_stats_count = e100_get_stats_count, 2202 .get_stats_count = e100_get_stats_count,
2203 .get_ethtool_stats = e100_get_ethtool_stats, 2203 .get_ethtool_stats = e100_get_ethtool_stats,
2204 .get_perm_addr = ethtool_op_get_perm_addr,
2204}; 2205};
2205 2206
2206static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2207static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -2351,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2351 e100_phy_init(nic); 2352 e100_phy_init(nic);
2352 2353
2353 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN); 2354 memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
2354 if(!is_valid_ether_addr(netdev->dev_addr)) { 2355 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2356 if(!is_valid_ether_addr(netdev->perm_addr)) {
2355 DPRINTK(PROBE, ERR, "Invalid MAC address from " 2357 DPRINTK(PROBE, ERR, "Invalid MAC address from "
2356 "EEPROM, aborting.\n"); 2358 "EEPROM, aborting.\n");
2357 err = -EAGAIN; 2359 err = -EAGAIN;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 092757bc721f..3f653a93e1bc 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -72,6 +72,10 @@
72#include <linux/mii.h> 72#include <linux/mii.h>
73#include <linux/ethtool.h> 73#include <linux/ethtool.h>
74#include <linux/if_vlan.h> 74#include <linux/if_vlan.h>
75#ifdef CONFIG_E1000_MQ
76#include <linux/cpu.h>
77#include <linux/smp.h>
78#endif
75 79
76#define BAR_0 0 80#define BAR_0 0
77#define BAR_1 1 81#define BAR_1 1
@@ -165,10 +169,33 @@ struct e1000_buffer {
165 uint16_t next_to_watch; 169 uint16_t next_to_watch;
166}; 170};
167 171
168struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; 172struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
169struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; 173struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
174
175struct e1000_tx_ring {
176 /* pointer to the descriptor ring memory */
177 void *desc;
178 /* physical address of the descriptor ring */
179 dma_addr_t dma;
180 /* length of descriptor ring in bytes */
181 unsigned int size;
182 /* number of descriptors in the ring */
183 unsigned int count;
184 /* next descriptor to associate a buffer with */
185 unsigned int next_to_use;
186 /* next descriptor to check for DD status bit */
187 unsigned int next_to_clean;
188 /* array of buffer information structs */
189 struct e1000_buffer *buffer_info;
190
191 struct e1000_buffer previous_buffer_info;
192 spinlock_t tx_lock;
193 uint16_t tdh;
194 uint16_t tdt;
195 uint64_t pkt;
196};
170 197
171struct e1000_desc_ring { 198struct e1000_rx_ring {
172 /* pointer to the descriptor ring memory */ 199 /* pointer to the descriptor ring memory */
173 void *desc; 200 void *desc;
174 /* physical address of the descriptor ring */ 201 /* physical address of the descriptor ring */
@@ -186,6 +213,10 @@ struct e1000_desc_ring {
186 /* arrays of page information for packet split */ 213 /* arrays of page information for packet split */
187 struct e1000_ps_page *ps_page; 214 struct e1000_ps_page *ps_page;
188 struct e1000_ps_page_dma *ps_page_dma; 215 struct e1000_ps_page_dma *ps_page_dma;
216
217 uint16_t rdh;
218 uint16_t rdt;
219 uint64_t pkt;
189}; 220};
190 221
191#define E1000_DESC_UNUSED(R) \ 222#define E1000_DESC_UNUSED(R) \
@@ -227,9 +258,10 @@ struct e1000_adapter {
227 unsigned long led_status; 258 unsigned long led_status;
228 259
229 /* TX */ 260 /* TX */
230 struct e1000_desc_ring tx_ring; 261 struct e1000_tx_ring *tx_ring; /* One per active queue */
231 struct e1000_buffer previous_buffer_info; 262#ifdef CONFIG_E1000_MQ
232 spinlock_t tx_lock; 263 struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
264#endif
233 uint32_t txd_cmd; 265 uint32_t txd_cmd;
234 uint32_t tx_int_delay; 266 uint32_t tx_int_delay;
235 uint32_t tx_abs_int_delay; 267 uint32_t tx_abs_int_delay;
@@ -246,19 +278,33 @@ struct e1000_adapter {
246 278
247 /* RX */ 279 /* RX */
248#ifdef CONFIG_E1000_NAPI 280#ifdef CONFIG_E1000_NAPI
249 boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, 281 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
250 int work_to_do); 282 struct e1000_rx_ring *rx_ring,
283 int *work_done, int work_to_do);
251#else 284#else
252 boolean_t (*clean_rx) (struct e1000_adapter *adapter); 285 boolean_t (*clean_rx) (struct e1000_adapter *adapter,
286 struct e1000_rx_ring *rx_ring);
253#endif 287#endif
254 void (*alloc_rx_buf) (struct e1000_adapter *adapter); 288 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
255 struct e1000_desc_ring rx_ring; 289 struct e1000_rx_ring *rx_ring);
290 struct e1000_rx_ring *rx_ring; /* One per active queue */
291#ifdef CONFIG_E1000_NAPI
292 struct net_device *polling_netdev; /* One per active queue */
293#endif
294#ifdef CONFIG_E1000_MQ
295 struct net_device **cpu_netdev; /* per-cpu */
296 struct call_async_data_struct rx_sched_call_data;
297 int cpu_for_queue[4];
298#endif
299 int num_queues;
300
256 uint64_t hw_csum_err; 301 uint64_t hw_csum_err;
257 uint64_t hw_csum_good; 302 uint64_t hw_csum_good;
303 uint64_t rx_hdr_split;
258 uint32_t rx_int_delay; 304 uint32_t rx_int_delay;
259 uint32_t rx_abs_int_delay; 305 uint32_t rx_abs_int_delay;
260 boolean_t rx_csum; 306 boolean_t rx_csum;
261 boolean_t rx_ps; 307 unsigned int rx_ps_pages;
262 uint32_t gorcl; 308 uint32_t gorcl;
263 uint64_t gorcl_old; 309 uint64_t gorcl_old;
264 uint16_t rx_ps_bsize0; 310 uint16_t rx_ps_bsize0;
@@ -278,8 +324,8 @@ struct e1000_adapter {
278 struct e1000_phy_stats phy_stats; 324 struct e1000_phy_stats phy_stats;
279 325
280 uint32_t test_icr; 326 uint32_t test_icr;
281 struct e1000_desc_ring test_tx_ring; 327 struct e1000_tx_ring test_tx_ring;
282 struct e1000_desc_ring test_rx_ring; 328 struct e1000_rx_ring test_rx_ring;
283 329
284 330
285 int msg_enable; 331 int msg_enable;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index f133ff0b0b94..9c7feaeaa6a4 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -39,10 +39,10 @@ extern int e1000_up(struct e1000_adapter *adapter);
39extern void e1000_down(struct e1000_adapter *adapter); 39extern void e1000_down(struct e1000_adapter *adapter);
40extern void e1000_reset(struct e1000_adapter *adapter); 40extern void e1000_reset(struct e1000_adapter *adapter);
41extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 41extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
42extern int e1000_setup_rx_resources(struct e1000_adapter *adapter); 42extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
43extern int e1000_setup_tx_resources(struct e1000_adapter *adapter); 43extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
44extern void e1000_free_rx_resources(struct e1000_adapter *adapter); 44extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
45extern void e1000_free_tx_resources(struct e1000_adapter *adapter); 45extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
46extern void e1000_update_stats(struct e1000_adapter *adapter); 46extern void e1000_update_stats(struct e1000_adapter *adapter);
47 47
48struct e1000_stats { 48struct e1000_stats {
@@ -91,7 +91,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
91 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, 91 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
92 { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, 92 { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 93 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) } 94 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
95 { "rx_header_split", E1000_STAT(rx_hdr_split) },
95}; 96};
96#define E1000_STATS_LEN \ 97#define E1000_STATS_LEN \
97 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) 98 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
@@ -546,8 +547,10 @@ e1000_set_eeprom(struct net_device *netdev,
546 ret_val = e1000_write_eeprom(hw, first_word, 547 ret_val = e1000_write_eeprom(hw, first_word,
547 last_word - first_word + 1, eeprom_buff); 548 last_word - first_word + 1, eeprom_buff);
548 549
549 /* Update the checksum over the first part of the EEPROM if needed */ 550 /* Update the checksum over the first part of the EEPROM if needed
550 if((ret_val == 0) && first_word <= EEPROM_CHECKSUM_REG) 551 * and flush shadow RAM for 82573 conrollers */
552 if((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
553 (hw->mac_type == e1000_82573)))
551 e1000_update_eeprom_checksum(hw); 554 e1000_update_eeprom_checksum(hw);
552 555
553 kfree(eeprom_buff); 556 kfree(eeprom_buff);
@@ -576,8 +579,8 @@ e1000_get_ringparam(struct net_device *netdev,
576{ 579{
577 struct e1000_adapter *adapter = netdev_priv(netdev); 580 struct e1000_adapter *adapter = netdev_priv(netdev);
578 e1000_mac_type mac_type = adapter->hw.mac_type; 581 e1000_mac_type mac_type = adapter->hw.mac_type;
579 struct e1000_desc_ring *txdr = &adapter->tx_ring; 582 struct e1000_tx_ring *txdr = adapter->tx_ring;
580 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 583 struct e1000_rx_ring *rxdr = adapter->rx_ring;
581 584
582 ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : 585 ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
583 E1000_MAX_82544_RXD; 586 E1000_MAX_82544_RXD;
@@ -597,20 +600,40 @@ e1000_set_ringparam(struct net_device *netdev,
597{ 600{
598 struct e1000_adapter *adapter = netdev_priv(netdev); 601 struct e1000_adapter *adapter = netdev_priv(netdev);
599 e1000_mac_type mac_type = adapter->hw.mac_type; 602 e1000_mac_type mac_type = adapter->hw.mac_type;
600 struct e1000_desc_ring *txdr = &adapter->tx_ring; 603 struct e1000_tx_ring *txdr, *tx_old, *tx_new;
601 struct e1000_desc_ring *rxdr = &adapter->rx_ring; 604 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
602 struct e1000_desc_ring tx_old, tx_new, rx_old, rx_new; 605 int i, err, tx_ring_size, rx_ring_size;
603 int err; 606
607 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
608 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
609
610 if (netif_running(adapter->netdev))
611 e1000_down(adapter);
604 612
605 tx_old = adapter->tx_ring; 613 tx_old = adapter->tx_ring;
606 rx_old = adapter->rx_ring; 614 rx_old = adapter->rx_ring;
607 615
616 adapter->tx_ring = kmalloc(tx_ring_size, GFP_KERNEL);
617 if (!adapter->tx_ring) {
618 err = -ENOMEM;
619 goto err_setup_rx;
620 }
621 memset(adapter->tx_ring, 0, tx_ring_size);
622
623 adapter->rx_ring = kmalloc(rx_ring_size, GFP_KERNEL);
624 if (!adapter->rx_ring) {
625 kfree(adapter->tx_ring);
626 err = -ENOMEM;
627 goto err_setup_rx;
628 }
629 memset(adapter->rx_ring, 0, rx_ring_size);
630
631 txdr = adapter->tx_ring;
632 rxdr = adapter->rx_ring;
633
608 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 634 if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
609 return -EINVAL; 635 return -EINVAL;
610 636
611 if(netif_running(adapter->netdev))
612 e1000_down(adapter);
613
614 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 637 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
615 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 638 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
616 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 639 E1000_MAX_RXD : E1000_MAX_82544_RXD));
@@ -621,11 +644,16 @@ e1000_set_ringparam(struct net_device *netdev,
621 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 644 E1000_MAX_TXD : E1000_MAX_82544_TXD));
622 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 645 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
623 646
647 for (i = 0; i < adapter->num_queues; i++) {
648 txdr[i].count = txdr->count;
649 rxdr[i].count = rxdr->count;
650 }
651
624 if(netif_running(adapter->netdev)) { 652 if(netif_running(adapter->netdev)) {
625 /* Try to get new resources before deleting old */ 653 /* Try to get new resources before deleting old */
626 if((err = e1000_setup_rx_resources(adapter))) 654 if ((err = e1000_setup_all_rx_resources(adapter)))
627 goto err_setup_rx; 655 goto err_setup_rx;
628 if((err = e1000_setup_tx_resources(adapter))) 656 if ((err = e1000_setup_all_tx_resources(adapter)))
629 goto err_setup_tx; 657 goto err_setup_tx;
630 658
631 /* save the new, restore the old in order to free it, 659 /* save the new, restore the old in order to free it,
@@ -635,8 +663,10 @@ e1000_set_ringparam(struct net_device *netdev,
635 tx_new = adapter->tx_ring; 663 tx_new = adapter->tx_ring;
636 adapter->rx_ring = rx_old; 664 adapter->rx_ring = rx_old;
637 adapter->tx_ring = tx_old; 665 adapter->tx_ring = tx_old;
638 e1000_free_rx_resources(adapter); 666 e1000_free_all_rx_resources(adapter);
639 e1000_free_tx_resources(adapter); 667 e1000_free_all_tx_resources(adapter);
668 kfree(tx_old);
669 kfree(rx_old);
640 adapter->rx_ring = rx_new; 670 adapter->rx_ring = rx_new;
641 adapter->tx_ring = tx_new; 671 adapter->tx_ring = tx_new;
642 if((err = e1000_up(adapter))) 672 if((err = e1000_up(adapter)))
@@ -645,7 +675,7 @@ e1000_set_ringparam(struct net_device *netdev,
645 675
646 return 0; 676 return 0;
647err_setup_tx: 677err_setup_tx:
648 e1000_free_rx_resources(adapter); 678 e1000_free_all_rx_resources(adapter);
649err_setup_rx: 679err_setup_rx:
650 adapter->rx_ring = rx_old; 680 adapter->rx_ring = rx_old;
651 adapter->tx_ring = tx_old; 681 adapter->tx_ring = tx_old;
@@ -696,6 +726,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
696 * Some bits that get toggled are ignored. 726 * Some bits that get toggled are ignored.
697 */ 727 */
698 switch (adapter->hw.mac_type) { 728 switch (adapter->hw.mac_type) {
729 /* there are several bits on newer hardware that are r/w */
730 case e1000_82571:
731 case e1000_82572:
732 toggle = 0x7FFFF3FF;
733 break;
699 case e1000_82573: 734 case e1000_82573:
700 toggle = 0x7FFFF033; 735 toggle = 0x7FFFF033;
701 break; 736 break;
@@ -898,8 +933,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
898static void 933static void
899e1000_free_desc_rings(struct e1000_adapter *adapter) 934e1000_free_desc_rings(struct e1000_adapter *adapter)
900{ 935{
901 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 936 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
902 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 937 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
903 struct pci_dev *pdev = adapter->pdev; 938 struct pci_dev *pdev = adapter->pdev;
904 int i; 939 int i;
905 940
@@ -930,19 +965,16 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
930 if(rxdr->desc) 965 if(rxdr->desc)
931 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 966 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
932 967
933 if(txdr->buffer_info) 968 kfree(txdr->buffer_info);
934 kfree(txdr->buffer_info); 969 kfree(rxdr->buffer_info);
935 if(rxdr->buffer_info)
936 kfree(rxdr->buffer_info);
937
938 return; 970 return;
939} 971}
940 972
941static int 973static int
942e1000_setup_desc_rings(struct e1000_adapter *adapter) 974e1000_setup_desc_rings(struct e1000_adapter *adapter)
943{ 975{
944 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 976 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
945 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 977 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
946 struct pci_dev *pdev = adapter->pdev; 978 struct pci_dev *pdev = adapter->pdev;
947 uint32_t rctl; 979 uint32_t rctl;
948 int size, i, ret_val; 980 int size, i, ret_val;
@@ -1245,6 +1277,8 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
1245 case e1000_82541_rev_2: 1277 case e1000_82541_rev_2:
1246 case e1000_82547: 1278 case e1000_82547:
1247 case e1000_82547_rev_2: 1279 case e1000_82547_rev_2:
1280 case e1000_82571:
1281 case e1000_82572:
1248 case e1000_82573: 1282 case e1000_82573:
1249 return e1000_integrated_phy_loopback(adapter); 1283 return e1000_integrated_phy_loopback(adapter);
1250 break; 1284 break;
@@ -1340,8 +1374,8 @@ e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1340static int 1374static int
1341e1000_run_loopback_test(struct e1000_adapter *adapter) 1375e1000_run_loopback_test(struct e1000_adapter *adapter)
1342{ 1376{
1343 struct e1000_desc_ring *txdr = &adapter->test_tx_ring; 1377 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1344 struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; 1378 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1345 struct pci_dev *pdev = adapter->pdev; 1379 struct pci_dev *pdev = adapter->pdev;
1346 int i, j, k, l, lc, good_cnt, ret_val=0; 1380 int i, j, k, l, lc, good_cnt, ret_val=0;
1347 unsigned long time; 1381 unsigned long time;
@@ -1509,6 +1543,7 @@ e1000_diag_test(struct net_device *netdev,
1509 data[2] = 0; 1543 data[2] = 0;
1510 data[3] = 0; 1544 data[3] = 0;
1511 } 1545 }
1546 msleep_interruptible(4 * 1000);
1512} 1547}
1513 1548
1514static void 1549static void
@@ -1625,7 +1660,7 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
1625 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ)) 1660 if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1626 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ); 1661 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1627 1662
1628 if(adapter->hw.mac_type < e1000_82573) { 1663 if(adapter->hw.mac_type < e1000_82571) {
1629 if(!adapter->blink_timer.function) { 1664 if(!adapter->blink_timer.function) {
1630 init_timer(&adapter->blink_timer); 1665 init_timer(&adapter->blink_timer);
1631 adapter->blink_timer.function = e1000_led_blink_callback; 1666 adapter->blink_timer.function = e1000_led_blink_callback;
@@ -1739,6 +1774,7 @@ struct ethtool_ops e1000_ethtool_ops = {
1739 .phys_id = e1000_phys_id, 1774 .phys_id = e1000_phys_id,
1740 .get_stats_count = e1000_get_stats_count, 1775 .get_stats_count = e1000_get_stats_count,
1741 .get_ethtool_stats = e1000_get_ethtool_stats, 1776 .get_ethtool_stats = e1000_get_ethtool_stats,
1777 .get_perm_addr = ethtool_op_get_perm_addr,
1742}; 1778};
1743 1779
1744void e1000_set_ethtool_ops(struct net_device *netdev) 1780void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 045f5426ab9a..8fc876da43b4 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -83,14 +83,14 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
83 83
84static const 84static const
85uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 85uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
86 { 8, 13, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 86 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
87 22, 24, 27, 30, 32, 35, 37, 40, 42, 44, 47, 49, 51, 54, 56, 58, 87 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
88 32, 35, 38, 41, 44, 47, 50, 53, 55, 58, 61, 63, 66, 69, 71, 74, 88 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
89 43, 47, 51, 54, 58, 61, 64, 67, 71, 74, 77, 80, 82, 85, 88, 90, 89 21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
90 57, 62, 66, 70, 74, 77, 81, 85, 88, 91, 94, 97, 100, 103, 106, 108, 90 40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
91 73, 78, 82, 87, 91, 95, 98, 102, 105, 109, 112, 114, 117, 119, 122, 124, 91 60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
92 91, 96, 101, 105, 109, 113, 116, 119, 122, 125, 127, 128, 128, 128, 128, 128, 92 83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
93 108, 113, 117, 121, 124, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}; 93 104, 109, 114, 118, 121, 124};
94 94
95 95
96/****************************************************************************** 96/******************************************************************************
@@ -286,7 +286,6 @@ e1000_set_mac_type(struct e1000_hw *hw)
286 case E1000_DEV_ID_82546GB_FIBER: 286 case E1000_DEV_ID_82546GB_FIBER:
287 case E1000_DEV_ID_82546GB_SERDES: 287 case E1000_DEV_ID_82546GB_SERDES:
288 case E1000_DEV_ID_82546GB_PCIE: 288 case E1000_DEV_ID_82546GB_PCIE:
289 case E1000_DEV_ID_82546GB_QUAD_COPPER:
290 hw->mac_type = e1000_82546_rev_3; 289 hw->mac_type = e1000_82546_rev_3;
291 break; 290 break;
292 case E1000_DEV_ID_82541EI: 291 case E1000_DEV_ID_82541EI:
@@ -305,8 +304,19 @@ e1000_set_mac_type(struct e1000_hw *hw)
305 case E1000_DEV_ID_82547GI: 304 case E1000_DEV_ID_82547GI:
306 hw->mac_type = e1000_82547_rev_2; 305 hw->mac_type = e1000_82547_rev_2;
307 break; 306 break;
307 case E1000_DEV_ID_82571EB_COPPER:
308 case E1000_DEV_ID_82571EB_FIBER:
309 case E1000_DEV_ID_82571EB_SERDES:
310 hw->mac_type = e1000_82571;
311 break;
312 case E1000_DEV_ID_82572EI_COPPER:
313 case E1000_DEV_ID_82572EI_FIBER:
314 case E1000_DEV_ID_82572EI_SERDES:
315 hw->mac_type = e1000_82572;
316 break;
308 case E1000_DEV_ID_82573E: 317 case E1000_DEV_ID_82573E:
309 case E1000_DEV_ID_82573E_IAMT: 318 case E1000_DEV_ID_82573E_IAMT:
319 case E1000_DEV_ID_82573L:
310 hw->mac_type = e1000_82573; 320 hw->mac_type = e1000_82573;
311 break; 321 break;
312 default: 322 default:
@@ -315,6 +325,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
315 } 325 }
316 326
317 switch(hw->mac_type) { 327 switch(hw->mac_type) {
328 case e1000_82571:
329 case e1000_82572:
318 case e1000_82573: 330 case e1000_82573:
319 hw->eeprom_semaphore_present = TRUE; 331 hw->eeprom_semaphore_present = TRUE;
320 /* fall through */ 332 /* fall through */
@@ -351,6 +363,8 @@ e1000_set_media_type(struct e1000_hw *hw)
351 switch (hw->device_id) { 363 switch (hw->device_id) {
352 case E1000_DEV_ID_82545GM_SERDES: 364 case E1000_DEV_ID_82545GM_SERDES:
353 case E1000_DEV_ID_82546GB_SERDES: 365 case E1000_DEV_ID_82546GB_SERDES:
366 case E1000_DEV_ID_82571EB_SERDES:
367 case E1000_DEV_ID_82572EI_SERDES:
354 hw->media_type = e1000_media_type_internal_serdes; 368 hw->media_type = e1000_media_type_internal_serdes;
355 break; 369 break;
356 default: 370 default:
@@ -523,6 +537,8 @@ e1000_reset_hw(struct e1000_hw *hw)
523 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 537 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
524 E1000_WRITE_FLUSH(hw); 538 E1000_WRITE_FLUSH(hw);
525 /* fall through */ 539 /* fall through */
540 case e1000_82571:
541 case e1000_82572:
526 ret_val = e1000_get_auto_rd_done(hw); 542 ret_val = e1000_get_auto_rd_done(hw);
527 if(ret_val) 543 if(ret_val)
528 /* We don't want to continue accessing MAC registers. */ 544 /* We don't want to continue accessing MAC registers. */
@@ -683,6 +699,9 @@ e1000_init_hw(struct e1000_hw *hw)
683 switch (hw->mac_type) { 699 switch (hw->mac_type) {
684 default: 700 default:
685 break; 701 break;
702 case e1000_82571:
703 case e1000_82572:
704 ctrl |= (1 << 22);
686 case e1000_82573: 705 case e1000_82573:
687 ctrl |= E1000_TXDCTL_COUNT_DESC; 706 ctrl |= E1000_TXDCTL_COUNT_DESC;
688 break; 707 break;
@@ -694,6 +713,26 @@ e1000_init_hw(struct e1000_hw *hw)
694 e1000_enable_tx_pkt_filtering(hw); 713 e1000_enable_tx_pkt_filtering(hw);
695 } 714 }
696 715
716 switch (hw->mac_type) {
717 default:
718 break;
719 case e1000_82571:
720 case e1000_82572:
721 ctrl = E1000_READ_REG(hw, TXDCTL1);
722 ctrl &= ~E1000_TXDCTL_WTHRESH;
723 ctrl |= E1000_TXDCTL_COUNT_DESC | E1000_TXDCTL_FULL_TX_DESC_WB;
724 ctrl |= (1 << 22);
725 E1000_WRITE_REG(hw, TXDCTL1, ctrl);
726 break;
727 }
728
729
730
731 if (hw->mac_type == e1000_82573) {
732 uint32_t gcr = E1000_READ_REG(hw, GCR);
733 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
734 E1000_WRITE_REG(hw, GCR, gcr);
735 }
697 736
698 /* Clear all of the statistics registers (clear on read). It is 737 /* Clear all of the statistics registers (clear on read). It is
699 * important that we do this after we have tried to establish link 738 * important that we do this after we have tried to establish link
@@ -878,6 +917,14 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
878 917
879 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 918 DEBUGFUNC("e1000_setup_fiber_serdes_link");
880 919
920 /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
921 * until explicitly turned off or a power cycle is performed. A read to
922 * the register does not indicate its status. Therefore, we ensure
923 * loopback mode is disabled during initialization.
924 */
925 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
926 E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK);
927
881 /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be 928 /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
882 * set when the optics detect a signal. On older adapters, it will be 929 * set when the optics detect a signal. On older adapters, it will be
883 * cleared when there is a signal. This applies to fiber media only. 930 * cleared when there is a signal. This applies to fiber media only.
@@ -2943,6 +2990,8 @@ e1000_phy_reset(struct e1000_hw *hw)
2943 2990
2944 switch (hw->mac_type) { 2991 switch (hw->mac_type) {
2945 case e1000_82541_rev_2: 2992 case e1000_82541_rev_2:
2993 case e1000_82571:
2994 case e1000_82572:
2946 ret_val = e1000_phy_hw_reset(hw); 2995 ret_val = e1000_phy_hw_reset(hw);
2947 if(ret_val) 2996 if(ret_val)
2948 return ret_val; 2997 return ret_val;
@@ -2981,6 +3030,16 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
2981 3030
2982 DEBUGFUNC("e1000_detect_gig_phy"); 3031 DEBUGFUNC("e1000_detect_gig_phy");
2983 3032
3033 /* The 82571 firmware may still be configuring the PHY. In this
3034 * case, we cannot access the PHY until the configuration is done. So
3035 * we explicitly set the PHY values. */
3036 if(hw->mac_type == e1000_82571 ||
3037 hw->mac_type == e1000_82572) {
3038 hw->phy_id = IGP01E1000_I_PHY_ID;
3039 hw->phy_type = e1000_phy_igp_2;
3040 return E1000_SUCCESS;
3041 }
3042
2984 /* Read the PHY ID Registers to identify which PHY is onboard. */ 3043 /* Read the PHY ID Registers to identify which PHY is onboard. */
2985 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); 3044 ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
2986 if(ret_val) 3045 if(ret_val)
@@ -3334,6 +3393,21 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
3334 eeprom->use_eerd = FALSE; 3393 eeprom->use_eerd = FALSE;
3335 eeprom->use_eewr = FALSE; 3394 eeprom->use_eewr = FALSE;
3336 break; 3395 break;
3396 case e1000_82571:
3397 case e1000_82572:
3398 eeprom->type = e1000_eeprom_spi;
3399 eeprom->opcode_bits = 8;
3400 eeprom->delay_usec = 1;
3401 if (eecd & E1000_EECD_ADDR_BITS) {
3402 eeprom->page_size = 32;
3403 eeprom->address_bits = 16;
3404 } else {
3405 eeprom->page_size = 8;
3406 eeprom->address_bits = 8;
3407 }
3408 eeprom->use_eerd = FALSE;
3409 eeprom->use_eewr = FALSE;
3410 break;
3337 case e1000_82573: 3411 case e1000_82573:
3338 eeprom->type = e1000_eeprom_spi; 3412 eeprom->type = e1000_eeprom_spi;
3339 eeprom->opcode_bits = 8; 3413 eeprom->opcode_bits = 8;
@@ -3543,25 +3617,26 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
3543 eecd = E1000_READ_REG(hw, EECD); 3617 eecd = E1000_READ_REG(hw, EECD);
3544 3618
3545 if (hw->mac_type != e1000_82573) { 3619 if (hw->mac_type != e1000_82573) {
3546 /* Request EEPROM Access */ 3620 /* Request EEPROM Access */
3547 if(hw->mac_type > e1000_82544) { 3621 if(hw->mac_type > e1000_82544) {
3548 eecd |= E1000_EECD_REQ; 3622 eecd |= E1000_EECD_REQ;
3549 E1000_WRITE_REG(hw, EECD, eecd);
3550 eecd = E1000_READ_REG(hw, EECD);
3551 while((!(eecd & E1000_EECD_GNT)) &&
3552 (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
3553 i++;
3554 udelay(5);
3555 eecd = E1000_READ_REG(hw, EECD);
3556 }
3557 if(!(eecd & E1000_EECD_GNT)) {
3558 eecd &= ~E1000_EECD_REQ;
3559 E1000_WRITE_REG(hw, EECD, eecd); 3623 E1000_WRITE_REG(hw, EECD, eecd);
3560 DEBUGOUT("Could not acquire EEPROM grant\n"); 3624 eecd = E1000_READ_REG(hw, EECD);
3561 return -E1000_ERR_EEPROM; 3625 while((!(eecd & E1000_EECD_GNT)) &&
3626 (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
3627 i++;
3628 udelay(5);
3629 eecd = E1000_READ_REG(hw, EECD);
3630 }
3631 if(!(eecd & E1000_EECD_GNT)) {
3632 eecd &= ~E1000_EECD_REQ;
3633 E1000_WRITE_REG(hw, EECD, eecd);
3634 DEBUGOUT("Could not acquire EEPROM grant\n");
3635 e1000_put_hw_eeprom_semaphore(hw);
3636 return -E1000_ERR_EEPROM;
3637 }
3562 } 3638 }
3563 } 3639 }
3564 }
3565 3640
3566 /* Setup EEPROM for Read/Write */ 3641 /* Setup EEPROM for Read/Write */
3567 3642
@@ -4064,7 +4139,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
4064 return -E1000_ERR_EEPROM; 4139 return -E1000_ERR_EEPROM;
4065 } 4140 }
4066 4141
4067 /* 82573 reads only through eerd */ 4142 /* 82573 writes only through eewr */
4068 if(eeprom->use_eewr == TRUE) 4143 if(eeprom->use_eewr == TRUE)
4069 return e1000_write_eeprom_eewr(hw, offset, words, data); 4144 return e1000_write_eeprom_eewr(hw, offset, words, data);
4070 4145
@@ -4353,9 +4428,16 @@ e1000_read_mac_addr(struct e1000_hw * hw)
4353 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 4428 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
4354 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 4429 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
4355 } 4430 }
4356 if(((hw->mac_type == e1000_82546) || (hw->mac_type == e1000_82546_rev_3)) && 4431 switch (hw->mac_type) {
4357 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) 4432 default:
4433 break;
4434 case e1000_82546:
4435 case e1000_82546_rev_3:
4436 case e1000_82571:
4437 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
4358 hw->perm_mac_addr[5] ^= 0x01; 4438 hw->perm_mac_addr[5] ^= 0x01;
4439 break;
4440 }
4359 4441
4360 for(i = 0; i < NODE_ADDRESS_SIZE; i++) 4442 for(i = 0; i < NODE_ADDRESS_SIZE; i++)
4361 hw->mac_addr[i] = hw->perm_mac_addr[i]; 4443 hw->mac_addr[i] = hw->perm_mac_addr[i];
@@ -4385,6 +4467,12 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
4385 e1000_rar_set(hw, hw->mac_addr, 0); 4467 e1000_rar_set(hw, hw->mac_addr, 0);
4386 4468
4387 rar_num = E1000_RAR_ENTRIES; 4469 rar_num = E1000_RAR_ENTRIES;
4470
4471 /* Reserve a spot for the Locally Administered Address to work around
4472 * an 82571 issue in which a reset on one port will reload the MAC on
4473 * the other port. */
4474 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
4475 rar_num -= 1;
4388 /* Zero out the other 15 receive addresses. */ 4476 /* Zero out the other 15 receive addresses. */
4389 DEBUGOUT("Clearing RAR[1-15]\n"); 4477 DEBUGOUT("Clearing RAR[1-15]\n");
4390 for(i = 1; i < rar_num; i++) { 4478 for(i = 1; i < rar_num; i++) {
@@ -4427,6 +4515,12 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
4427 /* Clear RAR[1-15] */ 4515 /* Clear RAR[1-15] */
4428 DEBUGOUT(" Clearing RAR[1-15]\n"); 4516 DEBUGOUT(" Clearing RAR[1-15]\n");
4429 num_rar_entry = E1000_RAR_ENTRIES; 4517 num_rar_entry = E1000_RAR_ENTRIES;
4518 /* Reserve a spot for the Locally Administered Address to work around
4519 * an 82571 issue in which a reset on one port will reload the MAC on
4520 * the other port. */
4521 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
4522 num_rar_entry -= 1;
4523
4430 for(i = rar_used_count; i < num_rar_entry; i++) { 4524 for(i = rar_used_count; i < num_rar_entry; i++) {
4431 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4525 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
4432 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 4526 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -4984,7 +5078,6 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
4984 temp = E1000_READ_REG(hw, ICTXQEC); 5078 temp = E1000_READ_REG(hw, ICTXQEC);
4985 temp = E1000_READ_REG(hw, ICTXQMTC); 5079 temp = E1000_READ_REG(hw, ICTXQMTC);
4986 temp = E1000_READ_REG(hw, ICRXDMTC); 5080 temp = E1000_READ_REG(hw, ICRXDMTC);
4987
4988} 5081}
4989 5082
4990/****************************************************************************** 5083/******************************************************************************
@@ -5151,6 +5244,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
5151 hw->bus_speed = e1000_bus_speed_unknown; 5244 hw->bus_speed = e1000_bus_speed_unknown;
5152 hw->bus_width = e1000_bus_width_unknown; 5245 hw->bus_width = e1000_bus_width_unknown;
5153 break; 5246 break;
5247 case e1000_82571:
5248 case e1000_82572:
5154 case e1000_82573: 5249 case e1000_82573:
5155 hw->bus_type = e1000_bus_type_pci_express; 5250 hw->bus_type = e1000_bus_type_pci_express;
5156 hw->bus_speed = e1000_bus_speed_2500; 5251 hw->bus_speed = e1000_bus_speed_2500;
@@ -5250,6 +5345,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
5250 int32_t ret_val; 5345 int32_t ret_val;
5251 uint16_t agc_value = 0; 5346 uint16_t agc_value = 0;
5252 uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 5347 uint16_t cur_agc, min_agc = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
5348 uint16_t max_agc = 0;
5253 uint16_t i, phy_data; 5349 uint16_t i, phy_data;
5254 uint16_t cable_length; 5350 uint16_t cable_length;
5255 5351
@@ -5338,6 +5434,40 @@ e1000_get_cable_length(struct e1000_hw *hw,
5338 IGP01E1000_AGC_RANGE) : 0; 5434 IGP01E1000_AGC_RANGE) : 0;
5339 *max_length = e1000_igp_cable_length_table[agc_value] + 5435 *max_length = e1000_igp_cable_length_table[agc_value] +
5340 IGP01E1000_AGC_RANGE; 5436 IGP01E1000_AGC_RANGE;
5437 } else if (hw->phy_type == e1000_phy_igp_2) {
5438 uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
5439 {IGP02E1000_PHY_AGC_A,
5440 IGP02E1000_PHY_AGC_B,
5441 IGP02E1000_PHY_AGC_C,
5442 IGP02E1000_PHY_AGC_D};
5443 /* Read the AGC registers for all channels */
5444 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
5445 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
5446 if (ret_val)
5447 return ret_val;
5448
5449 /* Getting bits 15:9, which represent the combination of course and
5450 * fine gain values. The result is a number that can be put into
5451 * the lookup table to obtain the approximate cable length. */
5452 cur_agc = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
5453 IGP02E1000_AGC_LENGTH_MASK;
5454
5455 /* Remove min & max AGC values from calculation. */
5456 if (e1000_igp_2_cable_length_table[min_agc] > e1000_igp_2_cable_length_table[cur_agc])
5457 min_agc = cur_agc;
5458 if (e1000_igp_2_cable_length_table[max_agc] < e1000_igp_2_cable_length_table[cur_agc])
5459 max_agc = cur_agc;
5460
5461 agc_value += e1000_igp_2_cable_length_table[cur_agc];
5462 }
5463
5464 agc_value -= (e1000_igp_2_cable_length_table[min_agc] + e1000_igp_2_cable_length_table[max_agc]);
5465 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
5466
5467 /* Calculate cable length with the error range of +/- 10 meters. */
5468 *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
5469 (agc_value - IGP02E1000_AGC_RANGE) : 0;
5470 *max_length = agc_value + IGP02E1000_AGC_RANGE;
5341 } 5471 }
5342 5472
5343 return E1000_SUCCESS; 5473 return E1000_SUCCESS;
@@ -6465,6 +6595,8 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
6465 default: 6595 default:
6466 msec_delay(5); 6596 msec_delay(5);
6467 break; 6597 break;
6598 case e1000_82571:
6599 case e1000_82572:
6468 case e1000_82573: 6600 case e1000_82573:
6469 while(timeout) { 6601 while(timeout) {
6470 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break; 6602 if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD) break;
@@ -6494,10 +6626,31 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
6494int32_t 6626int32_t
6495e1000_get_phy_cfg_done(struct e1000_hw *hw) 6627e1000_get_phy_cfg_done(struct e1000_hw *hw)
6496{ 6628{
6629 int32_t timeout = PHY_CFG_TIMEOUT;
6630 uint32_t cfg_mask = E1000_EEPROM_CFG_DONE;
6631
6497 DEBUGFUNC("e1000_get_phy_cfg_done"); 6632 DEBUGFUNC("e1000_get_phy_cfg_done");
6498 6633
6499 /* Simply wait for 10ms */ 6634 switch (hw->mac_type) {
6500 msec_delay(10); 6635 default:
6636 msec_delay(10);
6637 break;
6638 case e1000_82571:
6639 case e1000_82572:
6640 while (timeout) {
6641 if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask)
6642 break;
6643 else
6644 msec_delay(1);
6645 timeout--;
6646 }
6647
6648 if (!timeout) {
6649 DEBUGOUT("MNG configuration cycle has not completed.\n");
6650 return -E1000_ERR_RESET;
6651 }
6652 break;
6653 }
6501 6654
6502 return E1000_SUCCESS; 6655 return E1000_SUCCESS;
6503} 6656}
@@ -6569,8 +6722,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
6569 return; 6722 return;
6570 6723
6571 swsm = E1000_READ_REG(hw, SWSM); 6724 swsm = E1000_READ_REG(hw, SWSM);
6572 /* Release both semaphores. */ 6725 swsm &= ~(E1000_SWSM_SWESMBI);
6573 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
6574 E1000_WRITE_REG(hw, SWSM, swsm); 6726 E1000_WRITE_REG(hw, SWSM, swsm);
6575} 6727}
6576 6728
@@ -6606,6 +6758,8 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
6606 * if this is the case. We read FWSM to determine the manageability mode. 6758 * if this is the case. We read FWSM to determine the manageability mode.
6607 */ 6759 */
6608 switch (hw->mac_type) { 6760 switch (hw->mac_type) {
6761 case e1000_82571:
6762 case e1000_82572:
6609 case e1000_82573: 6763 case e1000_82573:
6610 fwsm = E1000_READ_REG(hw, FWSM); 6764 fwsm = E1000_READ_REG(hw, FWSM);
6611 if((fwsm & E1000_FWSM_MODE_MASK) != 0) 6765 if((fwsm & E1000_FWSM_MODE_MASK) != 0)
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 51c2b3a18b6f..4f2c196dc314 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -57,6 +57,8 @@ typedef enum {
57 e1000_82541_rev_2, 57 e1000_82541_rev_2,
58 e1000_82547, 58 e1000_82547,
59 e1000_82547_rev_2, 59 e1000_82547_rev_2,
60 e1000_82571,
61 e1000_82572,
60 e1000_82573, 62 e1000_82573,
61 e1000_num_macs 63 e1000_num_macs
62} e1000_mac_type; 64} e1000_mac_type;
@@ -478,10 +480,16 @@ uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
478#define E1000_DEV_ID_82546GB_SERDES 0x107B 480#define E1000_DEV_ID_82546GB_SERDES 0x107B
479#define E1000_DEV_ID_82546GB_PCIE 0x108A 481#define E1000_DEV_ID_82546GB_PCIE 0x108A
480#define E1000_DEV_ID_82547EI 0x1019 482#define E1000_DEV_ID_82547EI 0x1019
483#define E1000_DEV_ID_82571EB_COPPER 0x105E
484#define E1000_DEV_ID_82571EB_FIBER 0x105F
485#define E1000_DEV_ID_82571EB_SERDES 0x1060
486#define E1000_DEV_ID_82572EI_COPPER 0x107D
487#define E1000_DEV_ID_82572EI_FIBER 0x107E
488#define E1000_DEV_ID_82572EI_SERDES 0x107F
481#define E1000_DEV_ID_82573E 0x108B 489#define E1000_DEV_ID_82573E 0x108B
482#define E1000_DEV_ID_82573E_IAMT 0x108C 490#define E1000_DEV_ID_82573E_IAMT 0x108C
491#define E1000_DEV_ID_82573L 0x109A
483 492
484#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
485 493
486#define NODE_ADDRESS_SIZE 6 494#define NODE_ADDRESS_SIZE 6
487#define ETH_LENGTH_OF_ADDRESS 6 495#define ETH_LENGTH_OF_ADDRESS 6
@@ -833,6 +841,8 @@ struct e1000_ffvt_entry {
833#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX 841#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
834#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX 842#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
835 843
844#define E1000_DISABLE_SERDES_LOOPBACK 0x0400
845
836/* Register Set. (82543, 82544) 846/* Register Set. (82543, 82544)
837 * 847 *
838 * Registers are defined to be 32 bits and should be accessed as 32 bit values. 848 * Registers are defined to be 32 bits and should be accessed as 32 bit values.
@@ -853,6 +863,7 @@ struct e1000_ffvt_entry {
853#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 863#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
854#define E1000_FLA 0x0001C /* Flash Access - RW */ 864#define E1000_FLA 0x0001C /* Flash Access - RW */
855#define E1000_MDIC 0x00020 /* MDI Control - RW */ 865#define E1000_MDIC 0x00020 /* MDI Control - RW */
866#define E1000_SCTL 0x00024 /* SerDes Control - RW */
856#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 867#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
857#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 868#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
858#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 869#define E1000_FCT 0x00030 /* Flow Control Type - RW */
@@ -864,6 +875,12 @@ struct e1000_ffvt_entry {
864#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 875#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
865#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 876#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
866#define E1000_RCTL 0x00100 /* RX Control - RW */ 877#define E1000_RCTL 0x00100 /* RX Control - RW */
878#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
879#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
880#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */
881#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */
882#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */
883#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */
867#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ 884#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */
868#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ 885#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */
869#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ 886#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */
@@ -895,6 +912,12 @@ struct e1000_ffvt_entry {
895#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ 912#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */
896#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ 913#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */
897#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ 914#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */
915#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
916#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */
917#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */
918#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */
919#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */
920#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */
898#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */ 921#define E1000_RXDCTL 0x02828 /* RX Descriptor Control - RW */
899#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ 922#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */
900#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ 923#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */
@@ -980,15 +1003,15 @@ struct e1000_ffvt_entry {
980#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ 1003#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */
981#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ 1004#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */
982#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ 1005#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */
983#define E1000_IAC 0x4100 /* Interrupt Assertion Count */ 1006#define E1000_IAC 0x04100 /* Interrupt Assertion Count */
984#define E1000_ICRXPTC 0x4104 /* Interrupt Cause Rx Packet Timer Expire Count */ 1007#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */
985#define E1000_ICRXATC 0x4108 /* Interrupt Cause Rx Absolute Timer Expire Count */ 1008#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */
986#define E1000_ICTXPTC 0x410C /* Interrupt Cause Tx Packet Timer Expire Count */ 1009#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */
987#define E1000_ICTXATC 0x4110 /* Interrupt Cause Tx Absolute Timer Expire Count */ 1010#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */
988#define E1000_ICTXQEC 0x4118 /* Interrupt Cause Tx Queue Empty Count */ 1011#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */
989#define E1000_ICTXQMTC 0x411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ 1012#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */
990#define E1000_ICRXDMTC 0x4120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ 1013#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
991#define E1000_ICRXOC 0x4124 /* Interrupt Cause Receiver Overrun Count */ 1014#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */
992#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ 1015#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */
993#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ 1016#define E1000_RFCTL 0x05008 /* Receive Filter Control*/
994#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 1017#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
@@ -1018,6 +1041,14 @@ struct e1000_ffvt_entry {
1018#define E1000_FWSM 0x05B54 /* FW Semaphore */ 1041#define E1000_FWSM 0x05B54 /* FW Semaphore */
1019#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ 1042#define E1000_FFLT_DBG 0x05F04 /* Debug Register */
1020#define E1000_HICR 0x08F00 /* Host Inteface Control */ 1043#define E1000_HICR 0x08F00 /* Host Inteface Control */
1044
1045/* RSS registers */
1046#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */
1047#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */
1048#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */
1049#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */
1050#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */
1051#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */
1021/* Register Set (82542) 1052/* Register Set (82542)
1022 * 1053 *
1023 * Some of the 82542 registers are located at different offsets than they are 1054 * Some of the 82542 registers are located at different offsets than they are
@@ -1032,6 +1063,7 @@ struct e1000_ffvt_entry {
1032#define E1000_82542_CTRL_EXT E1000_CTRL_EXT 1063#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
1033#define E1000_82542_FLA E1000_FLA 1064#define E1000_82542_FLA E1000_FLA
1034#define E1000_82542_MDIC E1000_MDIC 1065#define E1000_82542_MDIC E1000_MDIC
1066#define E1000_82542_SCTL E1000_SCTL
1035#define E1000_82542_FCAL E1000_FCAL 1067#define E1000_82542_FCAL E1000_FCAL
1036#define E1000_82542_FCAH E1000_FCAH 1068#define E1000_82542_FCAH E1000_FCAH
1037#define E1000_82542_FCT E1000_FCT 1069#define E1000_82542_FCT E1000_FCT
@@ -1049,6 +1081,18 @@ struct e1000_ffvt_entry {
1049#define E1000_82542_RDLEN 0x00118 1081#define E1000_82542_RDLEN 0x00118
1050#define E1000_82542_RDH 0x00120 1082#define E1000_82542_RDH 0x00120
1051#define E1000_82542_RDT 0x00128 1083#define E1000_82542_RDT 0x00128
1084#define E1000_82542_RDTR0 E1000_82542_RDTR
1085#define E1000_82542_RDBAL0 E1000_82542_RDBAL
1086#define E1000_82542_RDBAH0 E1000_82542_RDBAH
1087#define E1000_82542_RDLEN0 E1000_82542_RDLEN
1088#define E1000_82542_RDH0 E1000_82542_RDH
1089#define E1000_82542_RDT0 E1000_82542_RDT
1090#define E1000_82542_RDTR1 0x00130
1091#define E1000_82542_RDBAL1 0x00138
1092#define E1000_82542_RDBAH1 0x0013C
1093#define E1000_82542_RDLEN1 0x00140
1094#define E1000_82542_RDH1 0x00148
1095#define E1000_82542_RDT1 0x00150
1052#define E1000_82542_FCRTH 0x00160 1096#define E1000_82542_FCRTH 0x00160
1053#define E1000_82542_FCRTL 0x00168 1097#define E1000_82542_FCRTL 0x00168
1054#define E1000_82542_FCTTV E1000_FCTTV 1098#define E1000_82542_FCTTV E1000_FCTTV
@@ -1197,6 +1241,13 @@ struct e1000_ffvt_entry {
1197#define E1000_82542_ICRXOC E1000_ICRXOC 1241#define E1000_82542_ICRXOC E1000_ICRXOC
1198#define E1000_82542_HICR E1000_HICR 1242#define E1000_82542_HICR E1000_HICR
1199 1243
1244#define E1000_82542_CPUVEC E1000_CPUVEC
1245#define E1000_82542_MRQC E1000_MRQC
1246#define E1000_82542_RETA E1000_RETA
1247#define E1000_82542_RSSRK E1000_RSSRK
1248#define E1000_82542_RSSIM E1000_RSSIM
1249#define E1000_82542_RSSIR E1000_RSSIR
1250
1200/* Statistics counters collected by the MAC */ 1251/* Statistics counters collected by the MAC */
1201struct e1000_hw_stats { 1252struct e1000_hw_stats {
1202 uint64_t crcerrs; 1253 uint64_t crcerrs;
@@ -1336,6 +1387,7 @@ struct e1000_hw {
1336 boolean_t serdes_link_down; 1387 boolean_t serdes_link_down;
1337 boolean_t tbi_compatibility_en; 1388 boolean_t tbi_compatibility_en;
1338 boolean_t tbi_compatibility_on; 1389 boolean_t tbi_compatibility_on;
1390 boolean_t laa_is_present;
1339 boolean_t phy_reset_disable; 1391 boolean_t phy_reset_disable;
1340 boolean_t fc_send_xon; 1392 boolean_t fc_send_xon;
1341 boolean_t fc_strict_ieee; 1393 boolean_t fc_strict_ieee;
@@ -1374,6 +1426,7 @@ struct e1000_hw {
1374#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ 1426#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */
1375#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 1427#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
1376#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 1428#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
1429#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */
1377#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ 1430#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
1378#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 1431#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
1379#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 1432#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
@@ -1491,6 +1544,8 @@ struct e1000_hw {
1491#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 1544#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000
1492#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 1545#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000
1493#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 1546#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000
1547#define E1000_CTRL_EXT_CANC 0x04000000 /* Interrupt delay cancellation */
1548#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
1494#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 1549#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */
1495#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ 1550#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */
1496 1551
@@ -1524,6 +1579,7 @@ struct e1000_hw {
1524#define E1000_LEDCTL_LED2_BLINK 0x00800000 1579#define E1000_LEDCTL_LED2_BLINK 0x00800000
1525#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 1580#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000
1526#define E1000_LEDCTL_LED3_MODE_SHIFT 24 1581#define E1000_LEDCTL_LED3_MODE_SHIFT 24
1582#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000
1527#define E1000_LEDCTL_LED3_IVRT 0x40000000 1583#define E1000_LEDCTL_LED3_IVRT 0x40000000
1528#define E1000_LEDCTL_LED3_BLINK 0x80000000 1584#define E1000_LEDCTL_LED3_BLINK 0x80000000
1529 1585
@@ -1784,6 +1840,16 @@ struct e1000_hw {
1784#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ 1840#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */
1785#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ 1841#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
1786 1842
1843/* Multiple Receive Queue Control */
1844#define E1000_MRQC_ENABLE_MASK 0x00000003
1845#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001
1846#define E1000_MRQC_ENABLE_RSS_INT 0x00000004
1847#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000
1848#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
1849#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
1850#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00040000
1851#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000
1852#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000
1787 1853
1788/* Definitions for power management and wakeup registers */ 1854/* Definitions for power management and wakeup registers */
1789/* Wake Up Control */ 1855/* Wake Up Control */
@@ -1928,6 +1994,7 @@ struct e1000_host_command_info {
1928#define E1000_MDALIGN 4096 1994#define E1000_MDALIGN 4096
1929 1995
1930#define E1000_GCR_BEM32 0x00400000 1996#define E1000_GCR_BEM32 0x00400000
1997#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
1931/* Function Active and Power State to MNG */ 1998/* Function Active and Power State to MNG */
1932#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 1999#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
1933#define E1000_FACTPS_LAN0_VALID 0x00000004 2000#define E1000_FACTPS_LAN0_VALID 0x00000004
@@ -1980,6 +2047,7 @@ struct e1000_host_command_info {
1980/* EEPROM Word Offsets */ 2047/* EEPROM Word Offsets */
1981#define EEPROM_COMPAT 0x0003 2048#define EEPROM_COMPAT 0x0003
1982#define EEPROM_ID_LED_SETTINGS 0x0004 2049#define EEPROM_ID_LED_SETTINGS 0x0004
2050#define EEPROM_VERSION 0x0005
1983#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ 2051#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */
1984#define EEPROM_PHY_CLASS_WORD 0x0007 2052#define EEPROM_PHY_CLASS_WORD 0x0007
1985#define EEPROM_INIT_CONTROL1_REG 0x000A 2053#define EEPROM_INIT_CONTROL1_REG 0x000A
@@ -1990,6 +2058,8 @@ struct e1000_host_command_info {
1990#define EEPROM_FLASH_VERSION 0x0032 2058#define EEPROM_FLASH_VERSION 0x0032
1991#define EEPROM_CHECKSUM_REG 0x003F 2059#define EEPROM_CHECKSUM_REG 0x003F
1992 2060
2061#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */
2062
1993/* Word definitions for ID LED Settings */ 2063/* Word definitions for ID LED Settings */
1994#define ID_LED_RESERVED_0000 0x0000 2064#define ID_LED_RESERVED_0000 0x0000
1995#define ID_LED_RESERVED_FFFF 0xFFFF 2065#define ID_LED_RESERVED_FFFF 0xFFFF
@@ -2108,6 +2178,8 @@ struct e1000_host_command_info {
2108#define E1000_PBA_22K 0x0016 2178#define E1000_PBA_22K 0x0016
2109#define E1000_PBA_24K 0x0018 2179#define E1000_PBA_24K 0x0018
2110#define E1000_PBA_30K 0x001E 2180#define E1000_PBA_30K 0x001E
2181#define E1000_PBA_32K 0x0020
2182#define E1000_PBA_38K 0x0026
2111#define E1000_PBA_40K 0x0028 2183#define E1000_PBA_40K 0x0028
2112#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ 2184#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */
2113 2185
@@ -2592,11 +2664,11 @@ struct e1000_host_command_info {
2592 2664
2593/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ 2665/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
2594#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 2666#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
2595#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 128 2667#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
2596 2668
2597/* The precision error of the cable length is +/- 10 meters */ 2669/* The precision error of the cable length is +/- 10 meters */
2598#define IGP01E1000_AGC_RANGE 10 2670#define IGP01E1000_AGC_RANGE 10
2599#define IGP02E1000_AGC_RANGE 10 2671#define IGP02E1000_AGC_RANGE 15
2600 2672
2601/* IGP01E1000 PCS Initialization register */ 2673/* IGP01E1000 PCS Initialization register */
2602/* bits 3:6 in the PCS registers stores the channels polarity */ 2674/* bits 3:6 in the PCS registers stores the channels polarity */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ee687c902a20..efbbda7cbcbf 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -43,7 +43,7 @@ char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43#else 43#else
44#define DRIVERNAPI "-NAPI" 44#define DRIVERNAPI "-NAPI"
45#endif 45#endif
46#define DRV_VERSION "6.0.60-k2"DRIVERNAPI 46#define DRV_VERSION "6.1.16-k2"DRIVERNAPI
47char e1000_driver_version[] = DRV_VERSION; 47char e1000_driver_version[] = DRV_VERSION;
48char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; 48char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49 49
@@ -80,6 +80,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
80 INTEL_E1000_ETHERNET_DEVICE(0x1026), 80 INTEL_E1000_ETHERNET_DEVICE(0x1026),
81 INTEL_E1000_ETHERNET_DEVICE(0x1027), 81 INTEL_E1000_ETHERNET_DEVICE(0x1027),
82 INTEL_E1000_ETHERNET_DEVICE(0x1028), 82 INTEL_E1000_ETHERNET_DEVICE(0x1028),
83 INTEL_E1000_ETHERNET_DEVICE(0x105E),
84 INTEL_E1000_ETHERNET_DEVICE(0x105F),
85 INTEL_E1000_ETHERNET_DEVICE(0x1060),
83 INTEL_E1000_ETHERNET_DEVICE(0x1075), 86 INTEL_E1000_ETHERNET_DEVICE(0x1075),
84 INTEL_E1000_ETHERNET_DEVICE(0x1076), 87 INTEL_E1000_ETHERNET_DEVICE(0x1076),
85 INTEL_E1000_ETHERNET_DEVICE(0x1077), 88 INTEL_E1000_ETHERNET_DEVICE(0x1077),
@@ -88,10 +91,13 @@ static struct pci_device_id e1000_pci_tbl[] = {
88 INTEL_E1000_ETHERNET_DEVICE(0x107A), 91 INTEL_E1000_ETHERNET_DEVICE(0x107A),
89 INTEL_E1000_ETHERNET_DEVICE(0x107B), 92 INTEL_E1000_ETHERNET_DEVICE(0x107B),
90 INTEL_E1000_ETHERNET_DEVICE(0x107C), 93 INTEL_E1000_ETHERNET_DEVICE(0x107C),
94 INTEL_E1000_ETHERNET_DEVICE(0x107D),
95 INTEL_E1000_ETHERNET_DEVICE(0x107E),
96 INTEL_E1000_ETHERNET_DEVICE(0x107F),
91 INTEL_E1000_ETHERNET_DEVICE(0x108A), 97 INTEL_E1000_ETHERNET_DEVICE(0x108A),
92 INTEL_E1000_ETHERNET_DEVICE(0x108B), 98 INTEL_E1000_ETHERNET_DEVICE(0x108B),
93 INTEL_E1000_ETHERNET_DEVICE(0x108C), 99 INTEL_E1000_ETHERNET_DEVICE(0x108C),
94 INTEL_E1000_ETHERNET_DEVICE(0x1099), 100 INTEL_E1000_ETHERNET_DEVICE(0x109A),
95 /* required last entry */ 101 /* required last entry */
96 {0,} 102 {0,}
97}; 103};
@@ -102,10 +108,18 @@ int e1000_up(struct e1000_adapter *adapter);
102void e1000_down(struct e1000_adapter *adapter); 108void e1000_down(struct e1000_adapter *adapter);
103void e1000_reset(struct e1000_adapter *adapter); 109void e1000_reset(struct e1000_adapter *adapter);
104int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 110int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
105int e1000_setup_tx_resources(struct e1000_adapter *adapter); 111int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
106int e1000_setup_rx_resources(struct e1000_adapter *adapter); 112int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
107void e1000_free_tx_resources(struct e1000_adapter *adapter); 113void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
108void e1000_free_rx_resources(struct e1000_adapter *adapter); 114void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
115int e1000_setup_tx_resources(struct e1000_adapter *adapter,
116 struct e1000_tx_ring *txdr);
117int e1000_setup_rx_resources(struct e1000_adapter *adapter,
118 struct e1000_rx_ring *rxdr);
119void e1000_free_tx_resources(struct e1000_adapter *adapter,
120 struct e1000_tx_ring *tx_ring);
121void e1000_free_rx_resources(struct e1000_adapter *adapter,
122 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter); 123void e1000_update_stats(struct e1000_adapter *adapter);
110 124
111/* Local Function Prototypes */ 125/* Local Function Prototypes */
@@ -114,14 +128,22 @@ static int e1000_init_module(void);
114static void e1000_exit_module(void); 128static void e1000_exit_module(void);
115static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 129static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
116static void __devexit e1000_remove(struct pci_dev *pdev); 130static void __devexit e1000_remove(struct pci_dev *pdev);
131static int e1000_alloc_queues(struct e1000_adapter *adapter);
132#ifdef CONFIG_E1000_MQ
133static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
134#endif
117static int e1000_sw_init(struct e1000_adapter *adapter); 135static int e1000_sw_init(struct e1000_adapter *adapter);
118static int e1000_open(struct net_device *netdev); 136static int e1000_open(struct net_device *netdev);
119static int e1000_close(struct net_device *netdev); 137static int e1000_close(struct net_device *netdev);
120static void e1000_configure_tx(struct e1000_adapter *adapter); 138static void e1000_configure_tx(struct e1000_adapter *adapter);
121static void e1000_configure_rx(struct e1000_adapter *adapter); 139static void e1000_configure_rx(struct e1000_adapter *adapter);
122static void e1000_setup_rctl(struct e1000_adapter *adapter); 140static void e1000_setup_rctl(struct e1000_adapter *adapter);
123static void e1000_clean_tx_ring(struct e1000_adapter *adapter); 141static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_rx_ring(struct e1000_adapter *adapter); 142static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
143static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
144 struct e1000_tx_ring *tx_ring);
145static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
146 struct e1000_rx_ring *rx_ring);
125static void e1000_set_multi(struct net_device *netdev); 147static void e1000_set_multi(struct net_device *netdev);
126static void e1000_update_phy_info(unsigned long data); 148static void e1000_update_phy_info(unsigned long data);
127static void e1000_watchdog(unsigned long data); 149static void e1000_watchdog(unsigned long data);
@@ -132,19 +154,26 @@ static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
132static int e1000_change_mtu(struct net_device *netdev, int new_mtu); 154static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
133static int e1000_set_mac(struct net_device *netdev, void *p); 155static int e1000_set_mac(struct net_device *netdev, void *p);
134static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs); 156static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
135static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter); 157static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
158 struct e1000_tx_ring *tx_ring);
136#ifdef CONFIG_E1000_NAPI 159#ifdef CONFIG_E1000_NAPI
137static int e1000_clean(struct net_device *netdev, int *budget); 160static int e1000_clean(struct net_device *poll_dev, int *budget);
138static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 161static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
162 struct e1000_rx_ring *rx_ring,
139 int *work_done, int work_to_do); 163 int *work_done, int work_to_do);
140static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 164static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
165 struct e1000_rx_ring *rx_ring,
141 int *work_done, int work_to_do); 166 int *work_done, int work_to_do);
142#else 167#else
143static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter); 168static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
144static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter); 169 struct e1000_rx_ring *rx_ring);
170static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
171 struct e1000_rx_ring *rx_ring);
145#endif 172#endif
146static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter); 173static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
147static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter); 174 struct e1000_rx_ring *rx_ring);
175static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring);
148static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 177static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
149static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, 178static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
150 int cmd); 179 int cmd);
@@ -162,8 +191,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
162static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 191static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
163static void e1000_restore_vlan(struct e1000_adapter *adapter); 192static void e1000_restore_vlan(struct e1000_adapter *adapter);
164 193
165static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
166#ifdef CONFIG_PM 194#ifdef CONFIG_PM
195static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
167static int e1000_resume(struct pci_dev *pdev); 196static int e1000_resume(struct pci_dev *pdev);
168#endif 197#endif
169 198
@@ -172,6 +201,11 @@ static int e1000_resume(struct pci_dev *pdev);
172static void e1000_netpoll (struct net_device *netdev); 201static void e1000_netpoll (struct net_device *netdev);
173#endif 202#endif
174 203
204#ifdef CONFIG_E1000_MQ
205/* for multiple Rx queues */
206void e1000_rx_schedule(void *data);
207#endif
208
175/* Exported from other modules */ 209/* Exported from other modules */
176 210
177extern void e1000_check_options(struct e1000_adapter *adapter); 211extern void e1000_check_options(struct e1000_adapter *adapter);
@@ -289,7 +323,7 @@ int
289e1000_up(struct e1000_adapter *adapter) 323e1000_up(struct e1000_adapter *adapter)
290{ 324{
291 struct net_device *netdev = adapter->netdev; 325 struct net_device *netdev = adapter->netdev;
292 int err; 326 int i, err;
293 327
294 /* hardware has been reset, we need to reload some things */ 328 /* hardware has been reset, we need to reload some things */
295 329
@@ -308,7 +342,8 @@ e1000_up(struct e1000_adapter *adapter)
308 e1000_configure_tx(adapter); 342 e1000_configure_tx(adapter);
309 e1000_setup_rctl(adapter); 343 e1000_setup_rctl(adapter);
310 e1000_configure_rx(adapter); 344 e1000_configure_rx(adapter);
311 adapter->alloc_rx_buf(adapter); 345 for (i = 0; i < adapter->num_queues; i++)
346 adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]);
312 347
313#ifdef CONFIG_PCI_MSI 348#ifdef CONFIG_PCI_MSI
314 if(adapter->hw.mac_type > e1000_82547_rev_2) { 349 if(adapter->hw.mac_type > e1000_82547_rev_2) {
@@ -344,6 +379,9 @@ e1000_down(struct e1000_adapter *adapter)
344 struct net_device *netdev = adapter->netdev; 379 struct net_device *netdev = adapter->netdev;
345 380
346 e1000_irq_disable(adapter); 381 e1000_irq_disable(adapter);
382#ifdef CONFIG_E1000_MQ
383 while (atomic_read(&adapter->rx_sched_call_data.count) != 0);
384#endif
347 free_irq(adapter->pdev->irq, netdev); 385 free_irq(adapter->pdev->irq, netdev);
348#ifdef CONFIG_PCI_MSI 386#ifdef CONFIG_PCI_MSI
349 if(adapter->hw.mac_type > e1000_82547_rev_2 && 387 if(adapter->hw.mac_type > e1000_82547_rev_2 &&
@@ -363,11 +401,10 @@ e1000_down(struct e1000_adapter *adapter)
363 netif_stop_queue(netdev); 401 netif_stop_queue(netdev);
364 402
365 e1000_reset(adapter); 403 e1000_reset(adapter);
366 e1000_clean_tx_ring(adapter); 404 e1000_clean_all_tx_rings(adapter);
367 e1000_clean_rx_ring(adapter); 405 e1000_clean_all_rx_rings(adapter);
368 406
369 /* If WoL is not enabled 407 /* If WoL is not enabled and management mode is not IAMT
370 * and management mode is not IAMT
371 * Power down the PHY so no link is implied when interface is down */ 408 * Power down the PHY so no link is implied when interface is down */
372 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 409 if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
373 adapter->hw.media_type == e1000_media_type_copper && 410 adapter->hw.media_type == e1000_media_type_copper &&
@@ -398,6 +435,10 @@ e1000_reset(struct e1000_adapter *adapter)
398 case e1000_82547_rev_2: 435 case e1000_82547_rev_2:
399 pba = E1000_PBA_30K; 436 pba = E1000_PBA_30K;
400 break; 437 break;
438 case e1000_82571:
439 case e1000_82572:
440 pba = E1000_PBA_38K;
441 break;
401 case e1000_82573: 442 case e1000_82573:
402 pba = E1000_PBA_12K; 443 pba = E1000_PBA_12K;
403 break; 444 break;
@@ -475,6 +516,7 @@ e1000_probe(struct pci_dev *pdev,
475 struct net_device *netdev; 516 struct net_device *netdev;
476 struct e1000_adapter *adapter; 517 struct e1000_adapter *adapter;
477 unsigned long mmio_start, mmio_len; 518 unsigned long mmio_start, mmio_len;
519 uint32_t ctrl_ext;
478 uint32_t swsm; 520 uint32_t swsm;
479 521
480 static int cards_found = 0; 522 static int cards_found = 0;
@@ -614,8 +656,9 @@ e1000_probe(struct pci_dev *pdev,
614 if(e1000_read_mac_addr(&adapter->hw)) 656 if(e1000_read_mac_addr(&adapter->hw))
615 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 657 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
616 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); 658 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
659 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
617 660
618 if(!is_valid_ether_addr(netdev->dev_addr)) { 661 if(!is_valid_ether_addr(netdev->perm_addr)) {
619 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 662 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
620 err = -EIO; 663 err = -EIO;
621 goto err_eeprom; 664 goto err_eeprom;
@@ -687,6 +730,12 @@ e1000_probe(struct pci_dev *pdev,
687 730
688 /* Let firmware know the driver has taken over */ 731 /* Let firmware know the driver has taken over */
689 switch(adapter->hw.mac_type) { 732 switch(adapter->hw.mac_type) {
733 case e1000_82571:
734 case e1000_82572:
735 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
736 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
737 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
738 break;
690 case e1000_82573: 739 case e1000_82573:
691 swsm = E1000_READ_REG(&adapter->hw, SWSM); 740 swsm = E1000_READ_REG(&adapter->hw, SWSM);
692 E1000_WRITE_REG(&adapter->hw, SWSM, 741 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -731,7 +780,11 @@ e1000_remove(struct pci_dev *pdev)
731{ 780{
732 struct net_device *netdev = pci_get_drvdata(pdev); 781 struct net_device *netdev = pci_get_drvdata(pdev);
733 struct e1000_adapter *adapter = netdev_priv(netdev); 782 struct e1000_adapter *adapter = netdev_priv(netdev);
783 uint32_t ctrl_ext;
734 uint32_t manc, swsm; 784 uint32_t manc, swsm;
785#ifdef CONFIG_E1000_NAPI
786 int i;
787#endif
735 788
736 flush_scheduled_work(); 789 flush_scheduled_work();
737 790
@@ -745,6 +798,12 @@ e1000_remove(struct pci_dev *pdev)
745 } 798 }
746 799
747 switch(adapter->hw.mac_type) { 800 switch(adapter->hw.mac_type) {
801 case e1000_82571:
802 case e1000_82572:
803 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
804 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
805 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
806 break;
748 case e1000_82573: 807 case e1000_82573:
749 swsm = E1000_READ_REG(&adapter->hw, SWSM); 808 swsm = E1000_READ_REG(&adapter->hw, SWSM);
750 E1000_WRITE_REG(&adapter->hw, SWSM, 809 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -756,13 +815,27 @@ e1000_remove(struct pci_dev *pdev)
756 } 815 }
757 816
758 unregister_netdev(netdev); 817 unregister_netdev(netdev);
818#ifdef CONFIG_E1000_NAPI
819 for (i = 0; i < adapter->num_queues; i++)
820 __dev_put(&adapter->polling_netdev[i]);
821#endif
759 822
760 if(!e1000_check_phy_reset_block(&adapter->hw)) 823 if(!e1000_check_phy_reset_block(&adapter->hw))
761 e1000_phy_hw_reset(&adapter->hw); 824 e1000_phy_hw_reset(&adapter->hw);
762 825
826 kfree(adapter->tx_ring);
827 kfree(adapter->rx_ring);
828#ifdef CONFIG_E1000_NAPI
829 kfree(adapter->polling_netdev);
830#endif
831
763 iounmap(adapter->hw.hw_addr); 832 iounmap(adapter->hw.hw_addr);
764 pci_release_regions(pdev); 833 pci_release_regions(pdev);
765 834
835#ifdef CONFIG_E1000_MQ
836 free_percpu(adapter->cpu_netdev);
837 free_percpu(adapter->cpu_tx_ring);
838#endif
766 free_netdev(netdev); 839 free_netdev(netdev);
767 840
768 pci_disable_device(pdev); 841 pci_disable_device(pdev);
@@ -783,6 +856,9 @@ e1000_sw_init(struct e1000_adapter *adapter)
783 struct e1000_hw *hw = &adapter->hw; 856 struct e1000_hw *hw = &adapter->hw;
784 struct net_device *netdev = adapter->netdev; 857 struct net_device *netdev = adapter->netdev;
785 struct pci_dev *pdev = adapter->pdev; 858 struct pci_dev *pdev = adapter->pdev;
859#ifdef CONFIG_E1000_NAPI
860 int i;
861#endif
786 862
787 /* PCI config space info */ 863 /* PCI config space info */
788 864
@@ -840,14 +916,123 @@ e1000_sw_init(struct e1000_adapter *adapter)
840 hw->master_slave = E1000_MASTER_SLAVE; 916 hw->master_slave = E1000_MASTER_SLAVE;
841 } 917 }
842 918
919#ifdef CONFIG_E1000_MQ
920 /* Number of supported queues */
921 switch (hw->mac_type) {
922 case e1000_82571:
923 case e1000_82572:
924 adapter->num_queues = 2;
925 break;
926 default:
927 adapter->num_queues = 1;
928 break;
929 }
930 adapter->num_queues = min(adapter->num_queues, num_online_cpus());
931#else
932 adapter->num_queues = 1;
933#endif
934
935 if (e1000_alloc_queues(adapter)) {
936 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
937 return -ENOMEM;
938 }
939
940#ifdef CONFIG_E1000_NAPI
941 for (i = 0; i < adapter->num_queues; i++) {
942 adapter->polling_netdev[i].priv = adapter;
943 adapter->polling_netdev[i].poll = &e1000_clean;
944 adapter->polling_netdev[i].weight = 64;
945 dev_hold(&adapter->polling_netdev[i]);
946 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
947 }
948#endif
949
950#ifdef CONFIG_E1000_MQ
951 e1000_setup_queue_mapping(adapter);
952#endif
953
843 atomic_set(&adapter->irq_sem, 1); 954 atomic_set(&adapter->irq_sem, 1);
844 spin_lock_init(&adapter->stats_lock); 955 spin_lock_init(&adapter->stats_lock);
845 spin_lock_init(&adapter->tx_lock);
846 956
847 return 0; 957 return 0;
848} 958}
849 959
850/** 960/**
961 * e1000_alloc_queues - Allocate memory for all rings
962 * @adapter: board private structure to initialize
963 *
964 * We allocate one ring per queue at run-time since we don't know the
965 * number of queues at compile-time. The polling_netdev array is
966 * intended for Multiqueue, but should work fine with a single queue.
967 **/
968
969static int __devinit
970e1000_alloc_queues(struct e1000_adapter *adapter)
971{
972 int size;
973
974 size = sizeof(struct e1000_tx_ring) * adapter->num_queues;
975 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
976 if (!adapter->tx_ring)
977 return -ENOMEM;
978 memset(adapter->tx_ring, 0, size);
979
980 size = sizeof(struct e1000_rx_ring) * adapter->num_queues;
981 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
982 if (!adapter->rx_ring) {
983 kfree(adapter->tx_ring);
984 return -ENOMEM;
985 }
986 memset(adapter->rx_ring, 0, size);
987
988#ifdef CONFIG_E1000_NAPI
989 size = sizeof(struct net_device) * adapter->num_queues;
990 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
991 if (!adapter->polling_netdev) {
992 kfree(adapter->tx_ring);
993 kfree(adapter->rx_ring);
994 return -ENOMEM;
995 }
996 memset(adapter->polling_netdev, 0, size);
997#endif
998
999 return E1000_SUCCESS;
1000}
1001
1002#ifdef CONFIG_E1000_MQ
1003static void __devinit
1004e1000_setup_queue_mapping(struct e1000_adapter *adapter)
1005{
1006 int i, cpu;
1007
1008 adapter->rx_sched_call_data.func = e1000_rx_schedule;
1009 adapter->rx_sched_call_data.info = adapter->netdev;
1010 cpus_clear(adapter->rx_sched_call_data.cpumask);
1011
1012 adapter->cpu_netdev = alloc_percpu(struct net_device *);
1013 adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
1014
1015 lock_cpu_hotplug();
1016 i = 0;
1017 for_each_online_cpu(cpu) {
1018 *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues];
1019 /* This is incomplete because we'd like to assign separate
1020 * physical cpus to these netdev polling structures and
1021 * avoid saturating a subset of cpus.
1022 */
1023 if (i < adapter->num_queues) {
1024 *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
1025 adapter->cpu_for_queue[i] = cpu;
1026 } else
1027 *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
1028
1029 i++;
1030 }
1031 unlock_cpu_hotplug();
1032}
1033#endif
1034
1035/**
851 * e1000_open - Called when a network interface is made active 1036 * e1000_open - Called when a network interface is made active
852 * @netdev: network interface device structure 1037 * @netdev: network interface device structure
853 * 1038 *
@@ -868,12 +1053,12 @@ e1000_open(struct net_device *netdev)
868 1053
869 /* allocate transmit descriptors */ 1054 /* allocate transmit descriptors */
870 1055
871 if((err = e1000_setup_tx_resources(adapter))) 1056 if ((err = e1000_setup_all_tx_resources(adapter)))
872 goto err_setup_tx; 1057 goto err_setup_tx;
873 1058
874 /* allocate receive descriptors */ 1059 /* allocate receive descriptors */
875 1060
876 if((err = e1000_setup_rx_resources(adapter))) 1061 if ((err = e1000_setup_all_rx_resources(adapter)))
877 goto err_setup_rx; 1062 goto err_setup_rx;
878 1063
879 if((err = e1000_up(adapter))) 1064 if((err = e1000_up(adapter)))
@@ -887,9 +1072,9 @@ e1000_open(struct net_device *netdev)
887 return E1000_SUCCESS; 1072 return E1000_SUCCESS;
888 1073
889err_up: 1074err_up:
890 e1000_free_rx_resources(adapter); 1075 e1000_free_all_rx_resources(adapter);
891err_setup_rx: 1076err_setup_rx:
892 e1000_free_tx_resources(adapter); 1077 e1000_free_all_tx_resources(adapter);
893err_setup_tx: 1078err_setup_tx:
894 e1000_reset(adapter); 1079 e1000_reset(adapter);
895 1080
@@ -915,8 +1100,8 @@ e1000_close(struct net_device *netdev)
915 1100
916 e1000_down(adapter); 1101 e1000_down(adapter);
917 1102
918 e1000_free_tx_resources(adapter); 1103 e1000_free_all_tx_resources(adapter);
919 e1000_free_rx_resources(adapter); 1104 e1000_free_all_rx_resources(adapter);
920 1105
921 if((adapter->hw.mng_cookie.status & 1106 if((adapter->hw.mng_cookie.status &
922 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { 1107 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
@@ -951,25 +1136,28 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
951/** 1136/**
952 * e1000_setup_tx_resources - allocate Tx resources (Descriptors) 1137 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
953 * @adapter: board private structure 1138 * @adapter: board private structure
1139 * @txdr: tx descriptor ring (for a specific queue) to setup
954 * 1140 *
955 * Return 0 on success, negative on failure 1141 * Return 0 on success, negative on failure
956 **/ 1142 **/
957 1143
958int 1144int
959e1000_setup_tx_resources(struct e1000_adapter *adapter) 1145e1000_setup_tx_resources(struct e1000_adapter *adapter,
1146 struct e1000_tx_ring *txdr)
960{ 1147{
961 struct e1000_desc_ring *txdr = &adapter->tx_ring;
962 struct pci_dev *pdev = adapter->pdev; 1148 struct pci_dev *pdev = adapter->pdev;
963 int size; 1149 int size;
964 1150
965 size = sizeof(struct e1000_buffer) * txdr->count; 1151 size = sizeof(struct e1000_buffer) * txdr->count;
966 txdr->buffer_info = vmalloc(size); 1152
1153 txdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
967 if(!txdr->buffer_info) { 1154 if(!txdr->buffer_info) {
968 DPRINTK(PROBE, ERR, 1155 DPRINTK(PROBE, ERR,
969 "Unable to allocate memory for the transmit descriptor ring\n"); 1156 "Unable to allocate memory for the transmit descriptor ring\n");
970 return -ENOMEM; 1157 return -ENOMEM;
971 } 1158 }
972 memset(txdr->buffer_info, 0, size); 1159 memset(txdr->buffer_info, 0, size);
1160 memset(&txdr->previous_buffer_info, 0, sizeof(struct e1000_buffer));
973 1161
974 /* round up to nearest 4K */ 1162 /* round up to nearest 4K */
975 1163
@@ -1018,11 +1206,41 @@ setup_tx_desc_die:
1018 1206
1019 txdr->next_to_use = 0; 1207 txdr->next_to_use = 0;
1020 txdr->next_to_clean = 0; 1208 txdr->next_to_clean = 0;
1209 spin_lock_init(&txdr->tx_lock);
1021 1210
1022 return 0; 1211 return 0;
1023} 1212}
1024 1213
1025/** 1214/**
1215 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1216 * (Descriptors) for all queues
1217 * @adapter: board private structure
1218 *
1219 * If this function returns with an error, then it's possible one or
1220 * more of the rings is populated (while the rest are not). It is the
1221 * callers duty to clean those orphaned rings.
1222 *
1223 * Return 0 on success, negative on failure
1224 **/
1225
1226int
1227e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1228{
1229 int i, err = 0;
1230
1231 for (i = 0; i < adapter->num_queues; i++) {
1232 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1233 if (err) {
1234 DPRINTK(PROBE, ERR,
1235 "Allocation for Tx Queue %u failed\n", i);
1236 break;
1237 }
1238 }
1239
1240 return err;
1241}
1242
1243/**
1026 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset 1244 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1027 * @adapter: board private structure 1245 * @adapter: board private structure
1028 * 1246 *
@@ -1032,23 +1250,43 @@ setup_tx_desc_die:
1032static void 1250static void
1033e1000_configure_tx(struct e1000_adapter *adapter) 1251e1000_configure_tx(struct e1000_adapter *adapter)
1034{ 1252{
1035 uint64_t tdba = adapter->tx_ring.dma; 1253 uint64_t tdba;
1036 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc); 1254 struct e1000_hw *hw = &adapter->hw;
1037 uint32_t tctl, tipg; 1255 uint32_t tdlen, tctl, tipg, tarc;
1038
1039 E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1040 E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1041
1042 E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
1043 1256
1044 /* Setup the HW Tx Head and Tail descriptor pointers */ 1257 /* Setup the HW Tx Head and Tail descriptor pointers */
1045 1258
1046 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1259 switch (adapter->num_queues) {
1047 E1000_WRITE_REG(&adapter->hw, TDT, 0); 1260 case 2:
1261 tdba = adapter->tx_ring[1].dma;
1262 tdlen = adapter->tx_ring[1].count *
1263 sizeof(struct e1000_tx_desc);
1264 E1000_WRITE_REG(hw, TDBAL1, (tdba & 0x00000000ffffffffULL));
1265 E1000_WRITE_REG(hw, TDBAH1, (tdba >> 32));
1266 E1000_WRITE_REG(hw, TDLEN1, tdlen);
1267 E1000_WRITE_REG(hw, TDH1, 0);
1268 E1000_WRITE_REG(hw, TDT1, 0);
1269 adapter->tx_ring[1].tdh = E1000_TDH1;
1270 adapter->tx_ring[1].tdt = E1000_TDT1;
1271 /* Fall Through */
1272 case 1:
1273 default:
1274 tdba = adapter->tx_ring[0].dma;
1275 tdlen = adapter->tx_ring[0].count *
1276 sizeof(struct e1000_tx_desc);
1277 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1278 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1279 E1000_WRITE_REG(hw, TDLEN, tdlen);
1280 E1000_WRITE_REG(hw, TDH, 0);
1281 E1000_WRITE_REG(hw, TDT, 0);
1282 adapter->tx_ring[0].tdh = E1000_TDH;
1283 adapter->tx_ring[0].tdt = E1000_TDT;
1284 break;
1285 }
1048 1286
1049 /* Set the default values for the Tx Inter Packet Gap timer */ 1287 /* Set the default values for the Tx Inter Packet Gap timer */
1050 1288
1051 switch (adapter->hw.mac_type) { 1289 switch (hw->mac_type) {
1052 case e1000_82542_rev2_0: 1290 case e1000_82542_rev2_0:
1053 case e1000_82542_rev2_1: 1291 case e1000_82542_rev2_1:
1054 tipg = DEFAULT_82542_TIPG_IPGT; 1292 tipg = DEFAULT_82542_TIPG_IPGT;
@@ -1056,67 +1294,81 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1056 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1294 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1057 break; 1295 break;
1058 default: 1296 default:
1059 if(adapter->hw.media_type == e1000_media_type_fiber || 1297 if (hw->media_type == e1000_media_type_fiber ||
1060 adapter->hw.media_type == e1000_media_type_internal_serdes) 1298 hw->media_type == e1000_media_type_internal_serdes)
1061 tipg = DEFAULT_82543_TIPG_IPGT_FIBER; 1299 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1062 else 1300 else
1063 tipg = DEFAULT_82543_TIPG_IPGT_COPPER; 1301 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1064 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; 1302 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1065 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; 1303 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1066 } 1304 }
1067 E1000_WRITE_REG(&adapter->hw, TIPG, tipg); 1305 E1000_WRITE_REG(hw, TIPG, tipg);
1068 1306
1069 /* Set the Tx Interrupt Delay register */ 1307 /* Set the Tx Interrupt Delay register */
1070 1308
1071 E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); 1309 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1072 if(adapter->hw.mac_type >= e1000_82540) 1310 if (hw->mac_type >= e1000_82540)
1073 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay); 1311 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1074 1312
1075 /* Program the Transmit Control Register */ 1313 /* Program the Transmit Control Register */
1076 1314
1077 tctl = E1000_READ_REG(&adapter->hw, TCTL); 1315 tctl = E1000_READ_REG(hw, TCTL);
1078 1316
1079 tctl &= ~E1000_TCTL_CT; 1317 tctl &= ~E1000_TCTL_CT;
1080 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | 1318 tctl |= E1000_TCTL_EN | E1000_TCTL_PSP | E1000_TCTL_RTLC |
1081 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1319 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1082 1320
1083 E1000_WRITE_REG(&adapter->hw, TCTL, tctl); 1321 E1000_WRITE_REG(hw, TCTL, tctl);
1322
1323 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1324 tarc = E1000_READ_REG(hw, TARC0);
1325 tarc |= ((1 << 25) | (1 << 21));
1326 E1000_WRITE_REG(hw, TARC0, tarc);
1327 tarc = E1000_READ_REG(hw, TARC1);
1328 tarc |= (1 << 25);
1329 if (tctl & E1000_TCTL_MULR)
1330 tarc &= ~(1 << 28);
1331 else
1332 tarc |= (1 << 28);
1333 E1000_WRITE_REG(hw, TARC1, tarc);
1334 }
1084 1335
1085 e1000_config_collision_dist(&adapter->hw); 1336 e1000_config_collision_dist(hw);
1086 1337
1087 /* Setup Transmit Descriptor Settings for eop descriptor */ 1338 /* Setup Transmit Descriptor Settings for eop descriptor */
1088 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP | 1339 adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
1089 E1000_TXD_CMD_IFCS; 1340 E1000_TXD_CMD_IFCS;
1090 1341
1091 if(adapter->hw.mac_type < e1000_82543) 1342 if (hw->mac_type < e1000_82543)
1092 adapter->txd_cmd |= E1000_TXD_CMD_RPS; 1343 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1093 else 1344 else
1094 adapter->txd_cmd |= E1000_TXD_CMD_RS; 1345 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1095 1346
1096 /* Cache if we're 82544 running in PCI-X because we'll 1347 /* Cache if we're 82544 running in PCI-X because we'll
1097 * need this to apply a workaround later in the send path. */ 1348 * need this to apply a workaround later in the send path. */
1098 if(adapter->hw.mac_type == e1000_82544 && 1349 if (hw->mac_type == e1000_82544 &&
1099 adapter->hw.bus_type == e1000_bus_type_pcix) 1350 hw->bus_type == e1000_bus_type_pcix)
1100 adapter->pcix_82544 = 1; 1351 adapter->pcix_82544 = 1;
1101} 1352}
1102 1353
1103/** 1354/**
1104 * e1000_setup_rx_resources - allocate Rx resources (Descriptors) 1355 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1105 * @adapter: board private structure 1356 * @adapter: board private structure
1357 * @rxdr: rx descriptor ring (for a specific queue) to setup
1106 * 1358 *
1107 * Returns 0 on success, negative on failure 1359 * Returns 0 on success, negative on failure
1108 **/ 1360 **/
1109 1361
1110int 1362int
1111e1000_setup_rx_resources(struct e1000_adapter *adapter) 1363e1000_setup_rx_resources(struct e1000_adapter *adapter,
1364 struct e1000_rx_ring *rxdr)
1112{ 1365{
1113 struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1114 struct pci_dev *pdev = adapter->pdev; 1366 struct pci_dev *pdev = adapter->pdev;
1115 int size, desc_len; 1367 int size, desc_len;
1116 1368
1117 size = sizeof(struct e1000_buffer) * rxdr->count; 1369 size = sizeof(struct e1000_buffer) * rxdr->count;
1118 rxdr->buffer_info = vmalloc(size); 1370 rxdr->buffer_info = vmalloc_node(size, pcibus_to_node(pdev->bus));
1119 if(!rxdr->buffer_info) { 1371 if (!rxdr->buffer_info) {
1120 DPRINTK(PROBE, ERR, 1372 DPRINTK(PROBE, ERR,
1121 "Unable to allocate memory for the receive descriptor ring\n"); 1373 "Unable to allocate memory for the receive descriptor ring\n");
1122 return -ENOMEM; 1374 return -ENOMEM;
@@ -1156,13 +1408,13 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter)
1156 1408
1157 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1409 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1158 1410
1159 if(!rxdr->desc) { 1411 if (!rxdr->desc) {
1412 DPRINTK(PROBE, ERR,
1413 "Unable to allocate memory for the receive descriptor ring\n");
1160setup_rx_desc_die: 1414setup_rx_desc_die:
1161 vfree(rxdr->buffer_info); 1415 vfree(rxdr->buffer_info);
1162 kfree(rxdr->ps_page); 1416 kfree(rxdr->ps_page);
1163 kfree(rxdr->ps_page_dma); 1417 kfree(rxdr->ps_page_dma);
1164 DPRINTK(PROBE, ERR,
1165 "Unable to allocate memory for the receive descriptor ring\n");
1166 return -ENOMEM; 1418 return -ENOMEM;
1167 } 1419 }
1168 1420
@@ -1174,9 +1426,12 @@ setup_rx_desc_die:
1174 "at %p\n", rxdr->size, rxdr->desc); 1426 "at %p\n", rxdr->size, rxdr->desc);
1175 /* Try again, without freeing the previous */ 1427 /* Try again, without freeing the previous */
1176 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1428 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1177 if(!rxdr->desc) {
1178 /* Failed allocation, critical failure */ 1429 /* Failed allocation, critical failure */
1430 if (!rxdr->desc) {
1179 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1431 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1432 DPRINTK(PROBE, ERR,
1433 "Unable to allocate memory "
1434 "for the receive descriptor ring\n");
1180 goto setup_rx_desc_die; 1435 goto setup_rx_desc_die;
1181 } 1436 }
1182 1437
@@ -1188,10 +1443,7 @@ setup_rx_desc_die:
1188 DPRINTK(PROBE, ERR, 1443 DPRINTK(PROBE, ERR,
1189 "Unable to allocate aligned memory " 1444 "Unable to allocate aligned memory "
1190 "for the receive descriptor ring\n"); 1445 "for the receive descriptor ring\n");
1191 vfree(rxdr->buffer_info); 1446 goto setup_rx_desc_die;
1192 kfree(rxdr->ps_page);
1193 kfree(rxdr->ps_page_dma);
1194 return -ENOMEM;
1195 } else { 1447 } else {
1196 /* Free old allocation, new allocation was successful */ 1448 /* Free old allocation, new allocation was successful */
1197 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1449 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
@@ -1206,15 +1458,48 @@ setup_rx_desc_die:
1206} 1458}
1207 1459
1208/** 1460/**
1461 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1462 * (Descriptors) for all queues
1463 * @adapter: board private structure
1464 *
1465 * If this function returns with an error, then it's possible one or
1466 * more of the rings is populated (while the rest are not). It is the
1467 * callers duty to clean those orphaned rings.
1468 *
1469 * Return 0 on success, negative on failure
1470 **/
1471
1472int
1473e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1474{
1475 int i, err = 0;
1476
1477 for (i = 0; i < adapter->num_queues; i++) {
1478 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1479 if (err) {
1480 DPRINTK(PROBE, ERR,
1481 "Allocation for Rx Queue %u failed\n", i);
1482 break;
1483 }
1484 }
1485
1486 return err;
1487}
1488
1489/**
1209 * e1000_setup_rctl - configure the receive control registers 1490 * e1000_setup_rctl - configure the receive control registers
1210 * @adapter: Board private structure 1491 * @adapter: Board private structure
1211 **/ 1492 **/
1212 1493#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1494 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1213static void 1495static void
1214e1000_setup_rctl(struct e1000_adapter *adapter) 1496e1000_setup_rctl(struct e1000_adapter *adapter)
1215{ 1497{
1216 uint32_t rctl, rfctl; 1498 uint32_t rctl, rfctl;
1217 uint32_t psrctl = 0; 1499 uint32_t psrctl = 0;
1500#ifdef CONFIG_E1000_PACKET_SPLIT
1501 uint32_t pages = 0;
1502#endif
1218 1503
1219 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1504 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1220 1505
@@ -1235,7 +1520,7 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1235 rctl |= E1000_RCTL_LPE; 1520 rctl |= E1000_RCTL_LPE;
1236 1521
1237 /* Setup buffer sizes */ 1522 /* Setup buffer sizes */
1238 if(adapter->hw.mac_type == e1000_82573) { 1523 if(adapter->hw.mac_type >= e1000_82571) {
1239 /* We can now specify buffers in 1K increments. 1524 /* We can now specify buffers in 1K increments.
1240 * BSIZE and BSEX are ignored in this case. */ 1525 * BSIZE and BSEX are ignored in this case. */
1241 rctl |= adapter->rx_buffer_len << 0x11; 1526 rctl |= adapter->rx_buffer_len << 0x11;
@@ -1268,11 +1553,14 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1268 * followed by the page buffers. Therefore, skb->data is 1553 * followed by the page buffers. Therefore, skb->data is
1269 * sized to hold the largest protocol header. 1554 * sized to hold the largest protocol header.
1270 */ 1555 */
1271 adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 1556 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1272 && (adapter->netdev->mtu 1557 if ((adapter->hw.mac_type > e1000_82547_rev_2) && (pages <= 3) &&
1273 < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0)); 1558 PAGE_SIZE <= 16384)
1559 adapter->rx_ps_pages = pages;
1560 else
1561 adapter->rx_ps_pages = 0;
1274#endif 1562#endif
1275 if(adapter->rx_ps) { 1563 if (adapter->rx_ps_pages) {
1276 /* Configure extra packet-split registers */ 1564 /* Configure extra packet-split registers */
1277 rfctl = E1000_READ_REG(&adapter->hw, RFCTL); 1565 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1278 rfctl |= E1000_RFCTL_EXTEN; 1566 rfctl |= E1000_RFCTL_EXTEN;
@@ -1284,12 +1572,19 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1284 1572
1285 psrctl |= adapter->rx_ps_bsize0 >> 1573 psrctl |= adapter->rx_ps_bsize0 >>
1286 E1000_PSRCTL_BSIZE0_SHIFT; 1574 E1000_PSRCTL_BSIZE0_SHIFT;
1287 psrctl |= PAGE_SIZE >> 1575
1288 E1000_PSRCTL_BSIZE1_SHIFT; 1576 switch (adapter->rx_ps_pages) {
1289 psrctl |= PAGE_SIZE << 1577 case 3:
1290 E1000_PSRCTL_BSIZE2_SHIFT; 1578 psrctl |= PAGE_SIZE <<
1291 psrctl |= PAGE_SIZE << 1579 E1000_PSRCTL_BSIZE3_SHIFT;
1292 E1000_PSRCTL_BSIZE3_SHIFT; 1580 case 2:
1581 psrctl |= PAGE_SIZE <<
1582 E1000_PSRCTL_BSIZE2_SHIFT;
1583 case 1:
1584 psrctl |= PAGE_SIZE >>
1585 E1000_PSRCTL_BSIZE1_SHIFT;
1586 break;
1587 }
1293 1588
1294 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl); 1589 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1295 } 1590 }
@@ -1307,91 +1602,181 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
1307static void 1602static void
1308e1000_configure_rx(struct e1000_adapter *adapter) 1603e1000_configure_rx(struct e1000_adapter *adapter)
1309{ 1604{
1310 uint64_t rdba = adapter->rx_ring.dma; 1605 uint64_t rdba;
1311 uint32_t rdlen, rctl, rxcsum; 1606 struct e1000_hw *hw = &adapter->hw;
1607 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
1608#ifdef CONFIG_E1000_MQ
1609 uint32_t reta, mrqc;
1610 int i;
1611#endif
1312 1612
1313 if(adapter->rx_ps) { 1613 if (adapter->rx_ps_pages) {
1314 rdlen = adapter->rx_ring.count * 1614 rdlen = adapter->rx_ring[0].count *
1315 sizeof(union e1000_rx_desc_packet_split); 1615 sizeof(union e1000_rx_desc_packet_split);
1316 adapter->clean_rx = e1000_clean_rx_irq_ps; 1616 adapter->clean_rx = e1000_clean_rx_irq_ps;
1317 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; 1617 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1318 } else { 1618 } else {
1319 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc); 1619 rdlen = adapter->rx_ring[0].count *
1620 sizeof(struct e1000_rx_desc);
1320 adapter->clean_rx = e1000_clean_rx_irq; 1621 adapter->clean_rx = e1000_clean_rx_irq;
1321 adapter->alloc_rx_buf = e1000_alloc_rx_buffers; 1622 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1322 } 1623 }
1323 1624
1324 /* disable receives while setting up the descriptors */ 1625 /* disable receives while setting up the descriptors */
1325 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1626 rctl = E1000_READ_REG(hw, RCTL);
1326 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); 1627 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
1327 1628
1328 /* set the Receive Delay Timer Register */ 1629 /* set the Receive Delay Timer Register */
1329 E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); 1630 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
1330 1631
1331 if(adapter->hw.mac_type >= e1000_82540) { 1632 if (hw->mac_type >= e1000_82540) {
1332 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay); 1633 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1333 if(adapter->itr > 1) 1634 if(adapter->itr > 1)
1334 E1000_WRITE_REG(&adapter->hw, ITR, 1635 E1000_WRITE_REG(hw, ITR,
1335 1000000000 / (adapter->itr * 256)); 1636 1000000000 / (adapter->itr * 256));
1336 } 1637 }
1337 1638
1338 /* Setup the Base and Length of the Rx Descriptor Ring */ 1639 if (hw->mac_type >= e1000_82571) {
1339 E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); 1640 /* Reset delay timers after every interrupt */
1340 E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); 1641 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1642 ctrl_ext |= E1000_CTRL_EXT_CANC;
1643 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1644 E1000_WRITE_FLUSH(hw);
1645 }
1646
1647 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1648 * the Base and Length of the Rx Descriptor Ring */
1649 switch (adapter->num_queues) {
1650#ifdef CONFIG_E1000_MQ
1651 case 2:
1652 rdba = adapter->rx_ring[1].dma;
1653 E1000_WRITE_REG(hw, RDBAL1, (rdba & 0x00000000ffffffffULL));
1654 E1000_WRITE_REG(hw, RDBAH1, (rdba >> 32));
1655 E1000_WRITE_REG(hw, RDLEN1, rdlen);
1656 E1000_WRITE_REG(hw, RDH1, 0);
1657 E1000_WRITE_REG(hw, RDT1, 0);
1658 adapter->rx_ring[1].rdh = E1000_RDH1;
1659 adapter->rx_ring[1].rdt = E1000_RDT1;
1660 /* Fall Through */
1661#endif
1662 case 1:
1663 default:
1664 rdba = adapter->rx_ring[0].dma;
1665 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1666 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1667 E1000_WRITE_REG(hw, RDLEN, rdlen);
1668 E1000_WRITE_REG(hw, RDH, 0);
1669 E1000_WRITE_REG(hw, RDT, 0);
1670 adapter->rx_ring[0].rdh = E1000_RDH;
1671 adapter->rx_ring[0].rdt = E1000_RDT;
1672 break;
1673 }
1674
1675#ifdef CONFIG_E1000_MQ
1676 if (adapter->num_queues > 1) {
1677 uint32_t random[10];
1678
1679 get_random_bytes(&random[0], 40);
1341 1680
1342 E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen); 1681 if (hw->mac_type <= e1000_82572) {
1682 E1000_WRITE_REG(hw, RSSIR, 0);
1683 E1000_WRITE_REG(hw, RSSIM, 0);
1684 }
1685
1686 switch (adapter->num_queues) {
1687 case 2:
1688 default:
1689 reta = 0x00800080;
1690 mrqc = E1000_MRQC_ENABLE_RSS_2Q;
1691 break;
1692 }
1343 1693
1344 /* Setup the HW Rx Head and Tail Descriptor Pointers */ 1694 /* Fill out redirection table */
1345 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1695 for (i = 0; i < 32; i++)
1346 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1696 E1000_WRITE_REG_ARRAY(hw, RETA, i, reta);
1697 /* Fill out hash function seeds */
1698 for (i = 0; i < 10; i++)
1699 E1000_WRITE_REG_ARRAY(hw, RSSRK, i, random[i]);
1700
1701 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1702 E1000_MRQC_RSS_FIELD_IPV4_TCP);
1703 E1000_WRITE_REG(hw, MRQC, mrqc);
1704 }
1705
1706 /* Multiqueue and packet checksumming are mutually exclusive. */
1707 if (hw->mac_type >= e1000_82571) {
1708 rxcsum = E1000_READ_REG(hw, RXCSUM);
1709 rxcsum |= E1000_RXCSUM_PCSD;
1710 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1711 }
1712
1713#else
1347 1714
1348 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 1715 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1349 if(adapter->hw.mac_type >= e1000_82543) { 1716 if (hw->mac_type >= e1000_82543) {
1350 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); 1717 rxcsum = E1000_READ_REG(hw, RXCSUM);
1351 if(adapter->rx_csum == TRUE) { 1718 if(adapter->rx_csum == TRUE) {
1352 rxcsum |= E1000_RXCSUM_TUOFL; 1719 rxcsum |= E1000_RXCSUM_TUOFL;
1353 1720
1354 /* Enable 82573 IPv4 payload checksum for UDP fragments 1721 /* Enable 82571 IPv4 payload checksum for UDP fragments
1355 * Must be used in conjunction with packet-split. */ 1722 * Must be used in conjunction with packet-split. */
1356 if((adapter->hw.mac_type > e1000_82547_rev_2) && 1723 if ((hw->mac_type >= e1000_82571) &&
1357 (adapter->rx_ps)) { 1724 (adapter->rx_ps_pages)) {
1358 rxcsum |= E1000_RXCSUM_IPPCSE; 1725 rxcsum |= E1000_RXCSUM_IPPCSE;
1359 } 1726 }
1360 } else { 1727 } else {
1361 rxcsum &= ~E1000_RXCSUM_TUOFL; 1728 rxcsum &= ~E1000_RXCSUM_TUOFL;
1362 /* don't need to clear IPPCSE as it defaults to 0 */ 1729 /* don't need to clear IPPCSE as it defaults to 0 */
1363 } 1730 }
1364 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum); 1731 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1365 } 1732 }
1733#endif /* CONFIG_E1000_MQ */
1366 1734
1367 if (adapter->hw.mac_type == e1000_82573) 1735 if (hw->mac_type == e1000_82573)
1368 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100); 1736 E1000_WRITE_REG(hw, ERT, 0x0100);
1369 1737
1370 /* Enable Receives */ 1738 /* Enable Receives */
1371 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 1739 E1000_WRITE_REG(hw, RCTL, rctl);
1372} 1740}
1373 1741
1374/** 1742/**
1375 * e1000_free_tx_resources - Free Tx Resources 1743 * e1000_free_tx_resources - Free Tx Resources per Queue
1376 * @adapter: board private structure 1744 * @adapter: board private structure
1745 * @tx_ring: Tx descriptor ring for a specific queue
1377 * 1746 *
1378 * Free all transmit software resources 1747 * Free all transmit software resources
1379 **/ 1748 **/
1380 1749
1381void 1750void
1382e1000_free_tx_resources(struct e1000_adapter *adapter) 1751e1000_free_tx_resources(struct e1000_adapter *adapter,
1752 struct e1000_tx_ring *tx_ring)
1383{ 1753{
1384 struct pci_dev *pdev = adapter->pdev; 1754 struct pci_dev *pdev = adapter->pdev;
1385 1755
1386 e1000_clean_tx_ring(adapter); 1756 e1000_clean_tx_ring(adapter, tx_ring);
1387 1757
1388 vfree(adapter->tx_ring.buffer_info); 1758 vfree(tx_ring->buffer_info);
1389 adapter->tx_ring.buffer_info = NULL; 1759 tx_ring->buffer_info = NULL;
1390 1760
1391 pci_free_consistent(pdev, adapter->tx_ring.size, 1761 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1392 adapter->tx_ring.desc, adapter->tx_ring.dma); 1762
1763 tx_ring->desc = NULL;
1764}
1765
1766/**
1767 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1768 * @adapter: board private structure
1769 *
1770 * Free all transmit software resources
1771 **/
1772
1773void
1774e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1775{
1776 int i;
1393 1777
1394 adapter->tx_ring.desc = NULL; 1778 for (i = 0; i < adapter->num_queues; i++)
1779 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1395} 1780}
1396 1781
1397static inline void 1782static inline void
@@ -1414,21 +1799,22 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1414/** 1799/**
1415 * e1000_clean_tx_ring - Free Tx Buffers 1800 * e1000_clean_tx_ring - Free Tx Buffers
1416 * @adapter: board private structure 1801 * @adapter: board private structure
1802 * @tx_ring: ring to be cleaned
1417 **/ 1803 **/
1418 1804
1419static void 1805static void
1420e1000_clean_tx_ring(struct e1000_adapter *adapter) 1806e1000_clean_tx_ring(struct e1000_adapter *adapter,
1807 struct e1000_tx_ring *tx_ring)
1421{ 1808{
1422 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1423 struct e1000_buffer *buffer_info; 1809 struct e1000_buffer *buffer_info;
1424 unsigned long size; 1810 unsigned long size;
1425 unsigned int i; 1811 unsigned int i;
1426 1812
1427 /* Free all the Tx ring sk_buffs */ 1813 /* Free all the Tx ring sk_buffs */
1428 1814
1429 if (likely(adapter->previous_buffer_info.skb != NULL)) { 1815 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
1430 e1000_unmap_and_free_tx_resource(adapter, 1816 e1000_unmap_and_free_tx_resource(adapter,
1431 &adapter->previous_buffer_info); 1817 &tx_ring->previous_buffer_info);
1432 } 1818 }
1433 1819
1434 for(i = 0; i < tx_ring->count; i++) { 1820 for(i = 0; i < tx_ring->count; i++) {
@@ -1446,24 +1832,39 @@ e1000_clean_tx_ring(struct e1000_adapter *adapter)
1446 tx_ring->next_to_use = 0; 1832 tx_ring->next_to_use = 0;
1447 tx_ring->next_to_clean = 0; 1833 tx_ring->next_to_clean = 0;
1448 1834
1449 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1835 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
1450 E1000_WRITE_REG(&adapter->hw, TDT, 0); 1836 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
1837}
1838
1839/**
1840 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
1841 * @adapter: board private structure
1842 **/
1843
1844static void
1845e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
1846{
1847 int i;
1848
1849 for (i = 0; i < adapter->num_queues; i++)
1850 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1451} 1851}
1452 1852
1453/** 1853/**
1454 * e1000_free_rx_resources - Free Rx Resources 1854 * e1000_free_rx_resources - Free Rx Resources
1455 * @adapter: board private structure 1855 * @adapter: board private structure
1856 * @rx_ring: ring to clean the resources from
1456 * 1857 *
1457 * Free all receive software resources 1858 * Free all receive software resources
1458 **/ 1859 **/
1459 1860
1460void 1861void
1461e1000_free_rx_resources(struct e1000_adapter *adapter) 1862e1000_free_rx_resources(struct e1000_adapter *adapter,
1863 struct e1000_rx_ring *rx_ring)
1462{ 1864{
1463 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1464 struct pci_dev *pdev = adapter->pdev; 1865 struct pci_dev *pdev = adapter->pdev;
1465 1866
1466 e1000_clean_rx_ring(adapter); 1867 e1000_clean_rx_ring(adapter, rx_ring);
1467 1868
1468 vfree(rx_ring->buffer_info); 1869 vfree(rx_ring->buffer_info);
1469 rx_ring->buffer_info = NULL; 1870 rx_ring->buffer_info = NULL;
@@ -1478,14 +1879,31 @@ e1000_free_rx_resources(struct e1000_adapter *adapter)
1478} 1879}
1479 1880
1480/** 1881/**
1481 * e1000_clean_rx_ring - Free Rx Buffers 1882 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
1883 * @adapter: board private structure
1884 *
1885 * Free all receive software resources
1886 **/
1887
1888void
1889e1000_free_all_rx_resources(struct e1000_adapter *adapter)
1890{
1891 int i;
1892
1893 for (i = 0; i < adapter->num_queues; i++)
1894 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
1895}
1896
1897/**
1898 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1482 * @adapter: board private structure 1899 * @adapter: board private structure
1900 * @rx_ring: ring to free buffers from
1483 **/ 1901 **/
1484 1902
1485static void 1903static void
1486e1000_clean_rx_ring(struct e1000_adapter *adapter) 1904e1000_clean_rx_ring(struct e1000_adapter *adapter,
1905 struct e1000_rx_ring *rx_ring)
1487{ 1906{
1488 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1489 struct e1000_buffer *buffer_info; 1907 struct e1000_buffer *buffer_info;
1490 struct e1000_ps_page *ps_page; 1908 struct e1000_ps_page *ps_page;
1491 struct e1000_ps_page_dma *ps_page_dma; 1909 struct e1000_ps_page_dma *ps_page_dma;
@@ -1508,7 +1926,7 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1508 dev_kfree_skb(buffer_info->skb); 1926 dev_kfree_skb(buffer_info->skb);
1509 buffer_info->skb = NULL; 1927 buffer_info->skb = NULL;
1510 1928
1511 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 1929 for(j = 0; j < adapter->rx_ps_pages; j++) {
1512 if(!ps_page->ps_page[j]) break; 1930 if(!ps_page->ps_page[j]) break;
1513 pci_unmap_single(pdev, 1931 pci_unmap_single(pdev,
1514 ps_page_dma->ps_page_dma[j], 1932 ps_page_dma->ps_page_dma[j],
@@ -1534,8 +1952,22 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter)
1534 rx_ring->next_to_clean = 0; 1952 rx_ring->next_to_clean = 0;
1535 rx_ring->next_to_use = 0; 1953 rx_ring->next_to_use = 0;
1536 1954
1537 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1955 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
1538 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1956 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
1957}
1958
1959/**
1960 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
1961 * @adapter: board private structure
1962 **/
1963
1964static void
1965e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
1966{
1967 int i;
1968
1969 for (i = 0; i < adapter->num_queues; i++)
1970 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1539} 1971}
1540 1972
1541/* The 82542 2.0 (revision 2) needs to have the receive unit in reset 1973/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
@@ -1556,7 +1988,7 @@ e1000_enter_82542_rst(struct e1000_adapter *adapter)
1556 mdelay(5); 1988 mdelay(5);
1557 1989
1558 if(netif_running(netdev)) 1990 if(netif_running(netdev))
1559 e1000_clean_rx_ring(adapter); 1991 e1000_clean_all_rx_rings(adapter);
1560} 1992}
1561 1993
1562static void 1994static void
@@ -1576,7 +2008,7 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
1576 2008
1577 if(netif_running(netdev)) { 2009 if(netif_running(netdev)) {
1578 e1000_configure_rx(adapter); 2010 e1000_configure_rx(adapter);
1579 e1000_alloc_rx_buffers(adapter); 2011 e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]);
1580 } 2012 }
1581} 2013}
1582 2014
@@ -1607,6 +2039,22 @@ e1000_set_mac(struct net_device *netdev, void *p)
1607 2039
1608 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0); 2040 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1609 2041
2042 /* With 82571 controllers, LAA may be overwritten (with the default)
2043 * due to controller reset from the other port. */
2044 if (adapter->hw.mac_type == e1000_82571) {
2045 /* activate the work around */
2046 adapter->hw.laa_is_present = 1;
2047
2048 /* Hold a copy of the LAA in RAR[14] This is done so that
2049 * between the time RAR[0] gets clobbered and the time it
2050 * gets fixed (in e1000_watchdog), the actual LAA is in one
2051 * of the RARs and no incoming packets directed to this port
2052 * are dropped. Eventaully the LAA will be in RAR[0] and
2053 * RAR[14] */
2054 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2055 E1000_RAR_ENTRIES - 1);
2056 }
2057
1610 if(adapter->hw.mac_type == e1000_82542_rev2_0) 2058 if(adapter->hw.mac_type == e1000_82542_rev2_0)
1611 e1000_leave_82542_rst(adapter); 2059 e1000_leave_82542_rst(adapter);
1612 2060
@@ -1629,12 +2077,13 @@ e1000_set_multi(struct net_device *netdev)
1629 struct e1000_adapter *adapter = netdev_priv(netdev); 2077 struct e1000_adapter *adapter = netdev_priv(netdev);
1630 struct e1000_hw *hw = &adapter->hw; 2078 struct e1000_hw *hw = &adapter->hw;
1631 struct dev_mc_list *mc_ptr; 2079 struct dev_mc_list *mc_ptr;
1632 unsigned long flags;
1633 uint32_t rctl; 2080 uint32_t rctl;
1634 uint32_t hash_value; 2081 uint32_t hash_value;
1635 int i; 2082 int i, rar_entries = E1000_RAR_ENTRIES;
1636 2083
1637 spin_lock_irqsave(&adapter->tx_lock, flags); 2084 /* reserve RAR[14] for LAA over-write work-around */
2085 if (adapter->hw.mac_type == e1000_82571)
2086 rar_entries--;
1638 2087
1639 /* Check for Promiscuous and All Multicast modes */ 2088 /* Check for Promiscuous and All Multicast modes */
1640 2089
@@ -1659,11 +2108,12 @@ e1000_set_multi(struct net_device *netdev)
1659 /* load the first 14 multicast address into the exact filters 1-14 2108 /* load the first 14 multicast address into the exact filters 1-14
1660 * RAR 0 is used for the station MAC adddress 2109 * RAR 0 is used for the station MAC adddress
1661 * if there are not 14 addresses, go ahead and clear the filters 2110 * if there are not 14 addresses, go ahead and clear the filters
2111 * -- with 82571 controllers only 0-13 entries are filled here
1662 */ 2112 */
1663 mc_ptr = netdev->mc_list; 2113 mc_ptr = netdev->mc_list;
1664 2114
1665 for(i = 1; i < E1000_RAR_ENTRIES; i++) { 2115 for(i = 1; i < rar_entries; i++) {
1666 if(mc_ptr) { 2116 if (mc_ptr) {
1667 e1000_rar_set(hw, mc_ptr->dmi_addr, i); 2117 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1668 mc_ptr = mc_ptr->next; 2118 mc_ptr = mc_ptr->next;
1669 } else { 2119 } else {
@@ -1686,8 +2136,6 @@ e1000_set_multi(struct net_device *netdev)
1686 2136
1687 if(hw->mac_type == e1000_82542_rev2_0) 2137 if(hw->mac_type == e1000_82542_rev2_0)
1688 e1000_leave_82542_rst(adapter); 2138 e1000_leave_82542_rst(adapter);
1689
1690 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1691} 2139}
1692 2140
1693/* Need to wait a few seconds after link up to get diagnostic information from 2141/* Need to wait a few seconds after link up to get diagnostic information from
@@ -1759,7 +2207,7 @@ static void
1759e1000_watchdog_task(struct e1000_adapter *adapter) 2207e1000_watchdog_task(struct e1000_adapter *adapter)
1760{ 2208{
1761 struct net_device *netdev = adapter->netdev; 2209 struct net_device *netdev = adapter->netdev;
1762 struct e1000_desc_ring *txdr = &adapter->tx_ring; 2210 struct e1000_tx_ring *txdr = &adapter->tx_ring[0];
1763 uint32_t link; 2211 uint32_t link;
1764 2212
1765 e1000_check_for_link(&adapter->hw); 2213 e1000_check_for_link(&adapter->hw);
@@ -1818,8 +2266,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1818 2266
1819 e1000_update_adaptive(&adapter->hw); 2267 e1000_update_adaptive(&adapter->hw);
1820 2268
1821 if(!netif_carrier_ok(netdev)) { 2269 if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) {
1822 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { 2270 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1823 /* We've lost link, so the controller stops DMA, 2271 /* We've lost link, so the controller stops DMA,
1824 * but we've got queued Tx work that's never going 2272 * but we've got queued Tx work that's never going
1825 * to get done, so reset controller to flush Tx. 2273 * to get done, so reset controller to flush Tx.
@@ -1847,6 +2295,11 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1847 /* Force detection of hung controller every watchdog period */ 2295 /* Force detection of hung controller every watchdog period */
1848 adapter->detect_tx_hung = TRUE; 2296 adapter->detect_tx_hung = TRUE;
1849 2297
2298 /* With 82571 controllers, LAA may be overwritten due to controller
2299 * reset from the other port. Set the appropriate LAA in RAR[0] */
2300 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2301 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2302
1850 /* Reset the timer */ 2303 /* Reset the timer */
1851 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 2304 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1852} 2305}
@@ -1859,7 +2312,8 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
1859#define E1000_TX_FLAGS_VLAN_SHIFT 16 2312#define E1000_TX_FLAGS_VLAN_SHIFT 16
1860 2313
1861static inline int 2314static inline int
1862e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb) 2315e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2316 struct sk_buff *skb)
1863{ 2317{
1864#ifdef NETIF_F_TSO 2318#ifdef NETIF_F_TSO
1865 struct e1000_context_desc *context_desc; 2319 struct e1000_context_desc *context_desc;
@@ -1910,8 +2364,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1910 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 2364 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1911 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 2365 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1912 2366
1913 i = adapter->tx_ring.next_to_use; 2367 i = tx_ring->next_to_use;
1914 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 2368 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1915 2369
1916 context_desc->lower_setup.ip_fields.ipcss = ipcss; 2370 context_desc->lower_setup.ip_fields.ipcss = ipcss;
1917 context_desc->lower_setup.ip_fields.ipcso = ipcso; 2371 context_desc->lower_setup.ip_fields.ipcso = ipcso;
@@ -1923,8 +2377,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1923 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; 2377 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1924 context_desc->cmd_and_length = cpu_to_le32(cmd_length); 2378 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1925 2379
1926 if(++i == adapter->tx_ring.count) i = 0; 2380 if (++i == tx_ring->count) i = 0;
1927 adapter->tx_ring.next_to_use = i; 2381 tx_ring->next_to_use = i;
1928 2382
1929 return 1; 2383 return 1;
1930 } 2384 }
@@ -1934,7 +2388,8 @@ e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1934} 2388}
1935 2389
1936static inline boolean_t 2390static inline boolean_t
1937e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 2391e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2392 struct sk_buff *skb)
1938{ 2393{
1939 struct e1000_context_desc *context_desc; 2394 struct e1000_context_desc *context_desc;
1940 unsigned int i; 2395 unsigned int i;
@@ -1943,8 +2398,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1943 if(likely(skb->ip_summed == CHECKSUM_HW)) { 2398 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1944 css = skb->h.raw - skb->data; 2399 css = skb->h.raw - skb->data;
1945 2400
1946 i = adapter->tx_ring.next_to_use; 2401 i = tx_ring->next_to_use;
1947 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i); 2402 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
1948 2403
1949 context_desc->upper_setup.tcp_fields.tucss = css; 2404 context_desc->upper_setup.tcp_fields.tucss = css;
1950 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum; 2405 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
@@ -1952,8 +2407,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1952 context_desc->tcp_seg_setup.data = 0; 2407 context_desc->tcp_seg_setup.data = 0;
1953 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); 2408 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1954 2409
1955 if(unlikely(++i == adapter->tx_ring.count)) i = 0; 2410 if (unlikely(++i == tx_ring->count)) i = 0;
1956 adapter->tx_ring.next_to_use = i; 2411 tx_ring->next_to_use = i;
1957 2412
1958 return TRUE; 2413 return TRUE;
1959 } 2414 }
@@ -1965,11 +2420,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1965#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR) 2420#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
1966 2421
1967static inline int 2422static inline int
1968e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb, 2423e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
1969 unsigned int first, unsigned int max_per_txd, 2424 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
1970 unsigned int nr_frags, unsigned int mss) 2425 unsigned int nr_frags, unsigned int mss)
1971{ 2426{
1972 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1973 struct e1000_buffer *buffer_info; 2427 struct e1000_buffer *buffer_info;
1974 unsigned int len = skb->len; 2428 unsigned int len = skb->len;
1975 unsigned int offset = 0, size, count = 0, i; 2429 unsigned int offset = 0, size, count = 0, i;
@@ -2065,9 +2519,9 @@ e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
2065} 2519}
2066 2520
2067static inline void 2521static inline void
2068e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags) 2522e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2523 int tx_flags, int count)
2069{ 2524{
2070 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2071 struct e1000_tx_desc *tx_desc = NULL; 2525 struct e1000_tx_desc *tx_desc = NULL;
2072 struct e1000_buffer *buffer_info; 2526 struct e1000_buffer *buffer_info;
2073 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 2527 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
@@ -2113,7 +2567,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
2113 wmb(); 2567 wmb();
2114 2568
2115 tx_ring->next_to_use = i; 2569 tx_ring->next_to_use = i;
2116 E1000_WRITE_REG(&adapter->hw, TDT, i); 2570 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
2117} 2571}
2118 2572
2119/** 2573/**
@@ -2206,6 +2660,7 @@ static int
2206e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2660e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2207{ 2661{
2208 struct e1000_adapter *adapter = netdev_priv(netdev); 2662 struct e1000_adapter *adapter = netdev_priv(netdev);
2663 struct e1000_tx_ring *tx_ring;
2209 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2664 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2210 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2665 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2211 unsigned int tx_flags = 0; 2666 unsigned int tx_flags = 0;
@@ -2218,7 +2673,13 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2218 unsigned int f; 2673 unsigned int f;
2219 len -= skb->data_len; 2674 len -= skb->data_len;
2220 2675
2221 if(unlikely(skb->len <= 0)) { 2676#ifdef CONFIG_E1000_MQ
2677 tx_ring = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
2678#else
2679 tx_ring = adapter->tx_ring;
2680#endif
2681
2682 if (unlikely(skb->len <= 0)) {
2222 dev_kfree_skb_any(skb); 2683 dev_kfree_skb_any(skb);
2223 return NETDEV_TX_OK; 2684 return NETDEV_TX_OK;
2224 } 2685 }
@@ -2262,21 +2723,42 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2262 if(adapter->pcix_82544) 2723 if(adapter->pcix_82544)
2263 count += nr_frags; 2724 count += nr_frags;
2264 2725
2265 local_irq_save(flags); 2726#ifdef NETIF_F_TSO
2266 if (!spin_trylock(&adapter->tx_lock)) { 2727 /* TSO Workaround for 82571/2 Controllers -- if skb->data
2267 /* Collision - tell upper layer to requeue */ 2728 * points to just header, pull a few bytes of payload from
2268 local_irq_restore(flags); 2729 * frags into skb->data */
2269 return NETDEV_TX_LOCKED; 2730 if (skb_shinfo(skb)->tso_size) {
2270 } 2731 uint8_t hdr_len;
2732 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2733 if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
2734 (adapter->hw.mac_type == e1000_82571 ||
2735 adapter->hw.mac_type == e1000_82572)) {
2736 unsigned int pull_size;
2737 pull_size = min((unsigned int)4, skb->data_len);
2738 if (!__pskb_pull_tail(skb, pull_size)) {
2739 printk(KERN_ERR "__pskb_pull_tail failed.\n");
2740 dev_kfree_skb_any(skb);
2741 return -EFAULT;
2742 }
2743 }
2744 }
2745#endif
2746
2271 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) 2747 if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2272 e1000_transfer_dhcp_info(adapter, skb); 2748 e1000_transfer_dhcp_info(adapter, skb);
2273 2749
2750 local_irq_save(flags);
2751 if (!spin_trylock(&tx_ring->tx_lock)) {
2752 /* Collision - tell upper layer to requeue */
2753 local_irq_restore(flags);
2754 return NETDEV_TX_LOCKED;
2755 }
2274 2756
2275 /* need: count + 2 desc gap to keep tail from touching 2757 /* need: count + 2 desc gap to keep tail from touching
2276 * head, otherwise try next time */ 2758 * head, otherwise try next time */
2277 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) { 2759 if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
2278 netif_stop_queue(netdev); 2760 netif_stop_queue(netdev);
2279 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2761 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2280 return NETDEV_TX_BUSY; 2762 return NETDEV_TX_BUSY;
2281 } 2763 }
2282 2764
@@ -2284,7 +2766,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2284 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) { 2766 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2285 netif_stop_queue(netdev); 2767 netif_stop_queue(netdev);
2286 mod_timer(&adapter->tx_fifo_stall_timer, jiffies); 2768 mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2287 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2769 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2288 return NETDEV_TX_BUSY; 2770 return NETDEV_TX_BUSY;
2289 } 2771 }
2290 } 2772 }
@@ -2294,37 +2776,37 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2294 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); 2776 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2295 } 2777 }
2296 2778
2297 first = adapter->tx_ring.next_to_use; 2779 first = tx_ring->next_to_use;
2298 2780
2299 tso = e1000_tso(adapter, skb); 2781 tso = e1000_tso(adapter, tx_ring, skb);
2300 if (tso < 0) { 2782 if (tso < 0) {
2301 dev_kfree_skb_any(skb); 2783 dev_kfree_skb_any(skb);
2302 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2784 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2303 return NETDEV_TX_OK; 2785 return NETDEV_TX_OK;
2304 } 2786 }
2305 2787
2306 if (likely(tso)) 2788 if (likely(tso))
2307 tx_flags |= E1000_TX_FLAGS_TSO; 2789 tx_flags |= E1000_TX_FLAGS_TSO;
2308 else if(likely(e1000_tx_csum(adapter, skb))) 2790 else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
2309 tx_flags |= E1000_TX_FLAGS_CSUM; 2791 tx_flags |= E1000_TX_FLAGS_CSUM;
2310 2792
2311 /* Old method was to assume IPv4 packet by default if TSO was enabled. 2793 /* Old method was to assume IPv4 packet by default if TSO was enabled.
2312 * 82573 hardware supports TSO capabilities for IPv6 as well... 2794 * 82571 hardware supports TSO capabilities for IPv6 as well...
2313 * no longer assume, we must. */ 2795 * no longer assume, we must. */
2314 if(likely(skb->protocol == ntohs(ETH_P_IP))) 2796 if (likely(skb->protocol == ntohs(ETH_P_IP)))
2315 tx_flags |= E1000_TX_FLAGS_IPV4; 2797 tx_flags |= E1000_TX_FLAGS_IPV4;
2316 2798
2317 e1000_tx_queue(adapter, 2799 e1000_tx_queue(adapter, tx_ring, tx_flags,
2318 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss), 2800 e1000_tx_map(adapter, tx_ring, skb, first,
2319 tx_flags); 2801 max_per_txd, nr_frags, mss));
2320 2802
2321 netdev->trans_start = jiffies; 2803 netdev->trans_start = jiffies;
2322 2804
2323 /* Make sure there is space in the ring for the next send. */ 2805 /* Make sure there is space in the ring for the next send. */
2324 if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2)) 2806 if (unlikely(E1000_DESC_UNUSED(tx_ring) < MAX_SKB_FRAGS + 2))
2325 netif_stop_queue(netdev); 2807 netif_stop_queue(netdev);
2326 2808
2327 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2809 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2328 return NETDEV_TX_OK; 2810 return NETDEV_TX_OK;
2329} 2811}
2330 2812
@@ -2388,9 +2870,18 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
2388 return -EINVAL; 2870 return -EINVAL;
2389 } 2871 }
2390 2872
2391#define MAX_STD_JUMBO_FRAME_SIZE 9216 2873#define MAX_STD_JUMBO_FRAME_SIZE 9234
2392 /* might want this to be bigger enum check... */ 2874 /* might want this to be bigger enum check... */
2393 if (adapter->hw.mac_type == e1000_82573 && 2875 /* 82571 controllers limit jumbo frame size to 10500 bytes */
2876 if ((adapter->hw.mac_type == e1000_82571 ||
2877 adapter->hw.mac_type == e1000_82572) &&
2878 max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2879 DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
2880 "on 82571 and 82572 controllers.\n");
2881 return -EINVAL;
2882 }
2883
2884 if(adapter->hw.mac_type == e1000_82573 &&
2394 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { 2885 max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2395 DPRINTK(PROBE, ERR, "Jumbo Frames not supported " 2886 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2396 "on 82573\n"); 2887 "on 82573\n");
@@ -2578,6 +3069,29 @@ e1000_update_stats(struct e1000_adapter *adapter)
2578 spin_unlock_irqrestore(&adapter->stats_lock, flags); 3069 spin_unlock_irqrestore(&adapter->stats_lock, flags);
2579} 3070}
2580 3071
3072#ifdef CONFIG_E1000_MQ
3073void
3074e1000_rx_schedule(void *data)
3075{
3076 struct net_device *poll_dev, *netdev = data;
3077 struct e1000_adapter *adapter = netdev->priv;
3078 int this_cpu = get_cpu();
3079
3080 poll_dev = *per_cpu_ptr(adapter->cpu_netdev, this_cpu);
3081 if (poll_dev == NULL) {
3082 put_cpu();
3083 return;
3084 }
3085
3086 if (likely(netif_rx_schedule_prep(poll_dev)))
3087 __netif_rx_schedule(poll_dev);
3088 else
3089 e1000_irq_enable(adapter);
3090
3091 put_cpu();
3092}
3093#endif
3094
2581/** 3095/**
2582 * e1000_intr - Interrupt Handler 3096 * e1000_intr - Interrupt Handler
2583 * @irq: interrupt number 3097 * @irq: interrupt number
@@ -2592,8 +3106,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2592 struct e1000_adapter *adapter = netdev_priv(netdev); 3106 struct e1000_adapter *adapter = netdev_priv(netdev);
2593 struct e1000_hw *hw = &adapter->hw; 3107 struct e1000_hw *hw = &adapter->hw;
2594 uint32_t icr = E1000_READ_REG(hw, ICR); 3108 uint32_t icr = E1000_READ_REG(hw, ICR);
2595#ifndef CONFIG_E1000_NAPI 3109#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI)
2596 unsigned int i; 3110 int i;
2597#endif 3111#endif
2598 3112
2599 if(unlikely(!icr)) 3113 if(unlikely(!icr))
@@ -2605,17 +3119,31 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2605 } 3119 }
2606 3120
2607#ifdef CONFIG_E1000_NAPI 3121#ifdef CONFIG_E1000_NAPI
2608 if(likely(netif_rx_schedule_prep(netdev))) { 3122 atomic_inc(&adapter->irq_sem);
2609 3123 E1000_WRITE_REG(hw, IMC, ~0);
2610 /* Disable interrupts and register for poll. The flush 3124 E1000_WRITE_FLUSH(hw);
2611 of the posted write is intentionally left out. 3125#ifdef CONFIG_E1000_MQ
2612 */ 3126 if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
2613 3127 cpu_set(adapter->cpu_for_queue[0],
2614 atomic_inc(&adapter->irq_sem); 3128 adapter->rx_sched_call_data.cpumask);
2615 E1000_WRITE_REG(hw, IMC, ~0); 3129 for (i = 1; i < adapter->num_queues; i++) {
2616 __netif_rx_schedule(netdev); 3130 cpu_set(adapter->cpu_for_queue[i],
3131 adapter->rx_sched_call_data.cpumask);
3132 atomic_inc(&adapter->irq_sem);
3133 }
3134 atomic_set(&adapter->rx_sched_call_data.count, i);
3135 smp_call_async_mask(&adapter->rx_sched_call_data);
3136 } else {
3137 printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
2617 } 3138 }
2618#else 3139#else /* if !CONFIG_E1000_MQ */
3140 if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
3141 __netif_rx_schedule(&adapter->polling_netdev[0]);
3142 else
3143 e1000_irq_enable(adapter);
3144#endif /* CONFIG_E1000_MQ */
3145
3146#else /* if !CONFIG_E1000_NAPI */
2619 /* Writing IMC and IMS is needed for 82547. 3147 /* Writing IMC and IMS is needed for 82547.
2620 Due to Hub Link bus being occupied, an interrupt 3148 Due to Hub Link bus being occupied, an interrupt
2621 de-assertion message is not able to be sent. 3149 de-assertion message is not able to be sent.
@@ -2632,13 +3160,14 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2632 } 3160 }
2633 3161
2634 for(i = 0; i < E1000_MAX_INTR; i++) 3162 for(i = 0; i < E1000_MAX_INTR; i++)
2635 if(unlikely(!adapter->clean_rx(adapter) & 3163 if(unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
2636 !e1000_clean_tx_irq(adapter))) 3164 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
2637 break; 3165 break;
2638 3166
2639 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) 3167 if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2640 e1000_irq_enable(adapter); 3168 e1000_irq_enable(adapter);
2641#endif 3169
3170#endif /* CONFIG_E1000_NAPI */
2642 3171
2643 return IRQ_HANDLED; 3172 return IRQ_HANDLED;
2644} 3173}
@@ -2650,22 +3179,37 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
2650 **/ 3179 **/
2651 3180
2652static int 3181static int
2653e1000_clean(struct net_device *netdev, int *budget) 3182e1000_clean(struct net_device *poll_dev, int *budget)
2654{ 3183{
2655 struct e1000_adapter *adapter = netdev_priv(netdev); 3184 struct e1000_adapter *adapter;
2656 int work_to_do = min(*budget, netdev->quota); 3185 int work_to_do = min(*budget, poll_dev->quota);
2657 int tx_cleaned; 3186 int tx_cleaned, i = 0, work_done = 0;
2658 int work_done = 0; 3187
3188 /* Must NOT use netdev_priv macro here. */
3189 adapter = poll_dev->priv;
2659 3190
2660 tx_cleaned = e1000_clean_tx_irq(adapter); 3191 /* Keep link state information with original netdev */
2661 adapter->clean_rx(adapter, &work_done, work_to_do); 3192 if (!netif_carrier_ok(adapter->netdev))
3193 goto quit_polling;
3194
3195 while (poll_dev != &adapter->polling_netdev[i]) {
3196 i++;
3197 if (unlikely(i == adapter->num_queues))
3198 BUG();
3199 }
3200
3201 tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
3202 adapter->clean_rx(adapter, &adapter->rx_ring[i],
3203 &work_done, work_to_do);
2662 3204
2663 *budget -= work_done; 3205 *budget -= work_done;
2664 netdev->quota -= work_done; 3206 poll_dev->quota -= work_done;
2665 3207
2666 if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
2667 /* If no Tx and not enough Rx work done, exit the polling mode */ 3208 /* If no Tx and not enough Rx work done, exit the polling mode */
2668 netif_rx_complete(netdev); 3209 if((!tx_cleaned && (work_done == 0)) ||
3210 !netif_running(adapter->netdev)) {
3211quit_polling:
3212 netif_rx_complete(poll_dev);
2669 e1000_irq_enable(adapter); 3213 e1000_irq_enable(adapter);
2670 return 0; 3214 return 0;
2671 } 3215 }
@@ -2680,9 +3224,9 @@ e1000_clean(struct net_device *netdev, int *budget)
2680 **/ 3224 **/
2681 3225
2682static boolean_t 3226static boolean_t
2683e1000_clean_tx_irq(struct e1000_adapter *adapter) 3227e1000_clean_tx_irq(struct e1000_adapter *adapter,
3228 struct e1000_tx_ring *tx_ring)
2684{ 3229{
2685 struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2686 struct net_device *netdev = adapter->netdev; 3230 struct net_device *netdev = adapter->netdev;
2687 struct e1000_tx_desc *tx_desc, *eop_desc; 3231 struct e1000_tx_desc *tx_desc, *eop_desc;
2688 struct e1000_buffer *buffer_info; 3232 struct e1000_buffer *buffer_info;
@@ -2693,12 +3237,12 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2693 eop = tx_ring->buffer_info[i].next_to_watch; 3237 eop = tx_ring->buffer_info[i].next_to_watch;
2694 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3238 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2695 3239
2696 while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 3240 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2697 /* Premature writeback of Tx descriptors clear (free buffers 3241 /* Premature writeback of Tx descriptors clear (free buffers
2698 * and unmap pci_mapping) previous_buffer_info */ 3242 * and unmap pci_mapping) previous_buffer_info */
2699 if (likely(adapter->previous_buffer_info.skb != NULL)) { 3243 if (likely(tx_ring->previous_buffer_info.skb != NULL)) {
2700 e1000_unmap_and_free_tx_resource(adapter, 3244 e1000_unmap_and_free_tx_resource(adapter,
2701 &adapter->previous_buffer_info); 3245 &tx_ring->previous_buffer_info);
2702 } 3246 }
2703 3247
2704 for(cleaned = FALSE; !cleaned; ) { 3248 for(cleaned = FALSE; !cleaned; ) {
@@ -2714,7 +3258,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2714#ifdef NETIF_F_TSO 3258#ifdef NETIF_F_TSO
2715 } else { 3259 } else {
2716 if (cleaned) { 3260 if (cleaned) {
2717 memcpy(&adapter->previous_buffer_info, 3261 memcpy(&tx_ring->previous_buffer_info,
2718 buffer_info, 3262 buffer_info,
2719 sizeof(struct e1000_buffer)); 3263 sizeof(struct e1000_buffer));
2720 memset(buffer_info, 0, 3264 memset(buffer_info, 0,
@@ -2732,6 +3276,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2732 3276
2733 if(unlikely(++i == tx_ring->count)) i = 0; 3277 if(unlikely(++i == tx_ring->count)) i = 0;
2734 } 3278 }
3279
3280 tx_ring->pkt++;
2735 3281
2736 eop = tx_ring->buffer_info[i].next_to_watch; 3282 eop = tx_ring->buffer_info[i].next_to_watch;
2737 eop_desc = E1000_TX_DESC(*tx_ring, eop); 3283 eop_desc = E1000_TX_DESC(*tx_ring, eop);
@@ -2739,15 +3285,15 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2739 3285
2740 tx_ring->next_to_clean = i; 3286 tx_ring->next_to_clean = i;
2741 3287
2742 spin_lock(&adapter->tx_lock); 3288 spin_lock(&tx_ring->tx_lock);
2743 3289
2744 if(unlikely(cleaned && netif_queue_stopped(netdev) && 3290 if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2745 netif_carrier_ok(netdev))) 3291 netif_carrier_ok(netdev)))
2746 netif_wake_queue(netdev); 3292 netif_wake_queue(netdev);
2747 3293
2748 spin_unlock(&adapter->tx_lock); 3294 spin_unlock(&tx_ring->tx_lock);
2749 if(adapter->detect_tx_hung) {
2750 3295
3296 if (adapter->detect_tx_hung) {
2751 /* Detect a transmit hang in hardware, this serializes the 3297 /* Detect a transmit hang in hardware, this serializes the
2752 * check with the clearing of time_stamp and movement of i */ 3298 * check with the clearing of time_stamp and movement of i */
2753 adapter->detect_tx_hung = FALSE; 3299 adapter->detect_tx_hung = FALSE;
@@ -2771,8 +3317,8 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2771 " next_to_watch <%x>\n" 3317 " next_to_watch <%x>\n"
2772 " jiffies <%lx>\n" 3318 " jiffies <%lx>\n"
2773 " next_to_watch.status <%x>\n", 3319 " next_to_watch.status <%x>\n",
2774 E1000_READ_REG(&adapter->hw, TDH), 3320 readl(adapter->hw.hw_addr + tx_ring->tdh),
2775 E1000_READ_REG(&adapter->hw, TDT), 3321 readl(adapter->hw.hw_addr + tx_ring->tdt),
2776 tx_ring->next_to_use, 3322 tx_ring->next_to_use,
2777 i, 3323 i,
2778 (unsigned long long)tx_ring->buffer_info[i].dma, 3324 (unsigned long long)tx_ring->buffer_info[i].dma,
@@ -2784,12 +3330,10 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter)
2784 } 3330 }
2785 } 3331 }
2786#ifdef NETIF_F_TSO 3332#ifdef NETIF_F_TSO
2787 3333 if (unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
2788 if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && 3334 time_after(jiffies, tx_ring->previous_buffer_info.time_stamp + HZ)))
2789 time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
2790 e1000_unmap_and_free_tx_resource( 3335 e1000_unmap_and_free_tx_resource(
2791 adapter, &adapter->previous_buffer_info); 3336 adapter, &tx_ring->previous_buffer_info);
2792
2793#endif 3337#endif
2794 return cleaned; 3338 return cleaned;
2795} 3339}
@@ -2852,13 +3396,14 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
2852 3396
2853static boolean_t 3397static boolean_t
2854#ifdef CONFIG_E1000_NAPI 3398#ifdef CONFIG_E1000_NAPI
2855e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done, 3399e1000_clean_rx_irq(struct e1000_adapter *adapter,
2856 int work_to_do) 3400 struct e1000_rx_ring *rx_ring,
3401 int *work_done, int work_to_do)
2857#else 3402#else
2858e1000_clean_rx_irq(struct e1000_adapter *adapter) 3403e1000_clean_rx_irq(struct e1000_adapter *adapter,
3404 struct e1000_rx_ring *rx_ring)
2859#endif 3405#endif
2860{ 3406{
2861 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2862 struct net_device *netdev = adapter->netdev; 3407 struct net_device *netdev = adapter->netdev;
2863 struct pci_dev *pdev = adapter->pdev; 3408 struct pci_dev *pdev = adapter->pdev;
2864 struct e1000_rx_desc *rx_desc; 3409 struct e1000_rx_desc *rx_desc;
@@ -2944,6 +3489,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter)
2944 } 3489 }
2945#endif /* CONFIG_E1000_NAPI */ 3490#endif /* CONFIG_E1000_NAPI */
2946 netdev->last_rx = jiffies; 3491 netdev->last_rx = jiffies;
3492 rx_ring->pkt++;
2947 3493
2948next_desc: 3494next_desc:
2949 rx_desc->status = 0; 3495 rx_desc->status = 0;
@@ -2953,7 +3499,7 @@ next_desc:
2953 rx_desc = E1000_RX_DESC(*rx_ring, i); 3499 rx_desc = E1000_RX_DESC(*rx_ring, i);
2954 } 3500 }
2955 rx_ring->next_to_clean = i; 3501 rx_ring->next_to_clean = i;
2956 adapter->alloc_rx_buf(adapter); 3502 adapter->alloc_rx_buf(adapter, rx_ring);
2957 3503
2958 return cleaned; 3504 return cleaned;
2959} 3505}
@@ -2965,13 +3511,14 @@ next_desc:
2965 3511
2966static boolean_t 3512static boolean_t
2967#ifdef CONFIG_E1000_NAPI 3513#ifdef CONFIG_E1000_NAPI
2968e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done, 3514e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
2969 int work_to_do) 3515 struct e1000_rx_ring *rx_ring,
3516 int *work_done, int work_to_do)
2970#else 3517#else
2971e1000_clean_rx_irq_ps(struct e1000_adapter *adapter) 3518e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
3519 struct e1000_rx_ring *rx_ring)
2972#endif 3520#endif
2973{ 3521{
2974 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2975 union e1000_rx_desc_packet_split *rx_desc; 3522 union e1000_rx_desc_packet_split *rx_desc;
2976 struct net_device *netdev = adapter->netdev; 3523 struct net_device *netdev = adapter->netdev;
2977 struct pci_dev *pdev = adapter->pdev; 3524 struct pci_dev *pdev = adapter->pdev;
@@ -3027,7 +3574,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3027 /* Good Receive */ 3574 /* Good Receive */
3028 skb_put(skb, length); 3575 skb_put(skb, length);
3029 3576
3030 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3577 for(j = 0; j < adapter->rx_ps_pages; j++) {
3031 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j]))) 3578 if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3032 break; 3579 break;
3033 3580
@@ -3048,11 +3595,13 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3048 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); 3595 rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3049 skb->protocol = eth_type_trans(skb, netdev); 3596 skb->protocol = eth_type_trans(skb, netdev);
3050 3597
3051#ifdef HAVE_RX_ZERO_COPY
3052 if(likely(rx_desc->wb.upper.header_status & 3598 if(likely(rx_desc->wb.upper.header_status &
3053 E1000_RXDPS_HDRSTAT_HDRSP)) 3599 E1000_RXDPS_HDRSTAT_HDRSP)) {
3600 adapter->rx_hdr_split++;
3601#ifdef HAVE_RX_ZERO_COPY
3054 skb_shinfo(skb)->zero_copy = TRUE; 3602 skb_shinfo(skb)->zero_copy = TRUE;
3055#endif 3603#endif
3604 }
3056#ifdef CONFIG_E1000_NAPI 3605#ifdef CONFIG_E1000_NAPI
3057 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) { 3606 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3058 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3607 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
@@ -3071,6 +3620,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
3071 } 3620 }
3072#endif /* CONFIG_E1000_NAPI */ 3621#endif /* CONFIG_E1000_NAPI */
3073 netdev->last_rx = jiffies; 3622 netdev->last_rx = jiffies;
3623 rx_ring->pkt++;
3074 3624
3075next_desc: 3625next_desc:
3076 rx_desc->wb.middle.status_error &= ~0xFF; 3626 rx_desc->wb.middle.status_error &= ~0xFF;
@@ -3081,7 +3631,7 @@ next_desc:
3081 staterr = le32_to_cpu(rx_desc->wb.middle.status_error); 3631 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3082 } 3632 }
3083 rx_ring->next_to_clean = i; 3633 rx_ring->next_to_clean = i;
3084 adapter->alloc_rx_buf(adapter); 3634 adapter->alloc_rx_buf(adapter, rx_ring);
3085 3635
3086 return cleaned; 3636 return cleaned;
3087} 3637}
@@ -3092,9 +3642,9 @@ next_desc:
3092 **/ 3642 **/
3093 3643
3094static void 3644static void
3095e1000_alloc_rx_buffers(struct e1000_adapter *adapter) 3645e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3646 struct e1000_rx_ring *rx_ring)
3096{ 3647{
3097 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3098 struct net_device *netdev = adapter->netdev; 3648 struct net_device *netdev = adapter->netdev;
3099 struct pci_dev *pdev = adapter->pdev; 3649 struct pci_dev *pdev = adapter->pdev;
3100 struct e1000_rx_desc *rx_desc; 3650 struct e1000_rx_desc *rx_desc;
@@ -3178,7 +3728,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3178 * applicable for weak-ordered memory model archs, 3728 * applicable for weak-ordered memory model archs,
3179 * such as IA-64). */ 3729 * such as IA-64). */
3180 wmb(); 3730 wmb();
3181 E1000_WRITE_REG(&adapter->hw, RDT, i); 3731 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
3182 } 3732 }
3183 3733
3184 if(unlikely(++i == rx_ring->count)) i = 0; 3734 if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3194,9 +3744,9 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3194 **/ 3744 **/
3195 3745
3196static void 3746static void
3197e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter) 3747e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
3748 struct e1000_rx_ring *rx_ring)
3198{ 3749{
3199 struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3200 struct net_device *netdev = adapter->netdev; 3750 struct net_device *netdev = adapter->netdev;
3201 struct pci_dev *pdev = adapter->pdev; 3751 struct pci_dev *pdev = adapter->pdev;
3202 union e1000_rx_desc_packet_split *rx_desc; 3752 union e1000_rx_desc_packet_split *rx_desc;
@@ -3215,22 +3765,26 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3215 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 3765 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3216 3766
3217 for(j = 0; j < PS_PAGE_BUFFERS; j++) { 3767 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3218 if(unlikely(!ps_page->ps_page[j])) { 3768 if (j < adapter->rx_ps_pages) {
3219 ps_page->ps_page[j] = 3769 if (likely(!ps_page->ps_page[j])) {
3220 alloc_page(GFP_ATOMIC); 3770 ps_page->ps_page[j] =
3221 if(unlikely(!ps_page->ps_page[j])) 3771 alloc_page(GFP_ATOMIC);
3222 goto no_buffers; 3772 if (unlikely(!ps_page->ps_page[j]))
3223 ps_page_dma->ps_page_dma[j] = 3773 goto no_buffers;
3224 pci_map_page(pdev, 3774 ps_page_dma->ps_page_dma[j] =
3225 ps_page->ps_page[j], 3775 pci_map_page(pdev,
3226 0, PAGE_SIZE, 3776 ps_page->ps_page[j],
3227 PCI_DMA_FROMDEVICE); 3777 0, PAGE_SIZE,
3228 } 3778 PCI_DMA_FROMDEVICE);
3229 /* Refresh the desc even if buffer_addrs didn't 3779 }
3230 * change because each write-back erases this info. 3780 /* Refresh the desc even if buffer_addrs didn't
3231 */ 3781 * change because each write-back erases
3232 rx_desc->read.buffer_addr[j+1] = 3782 * this info.
3233 cpu_to_le64(ps_page_dma->ps_page_dma[j]); 3783 */
3784 rx_desc->read.buffer_addr[j+1] =
3785 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3786 } else
3787 rx_desc->read.buffer_addr[j+1] = ~0;
3234 } 3788 }
3235 3789
3236 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 3790 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
@@ -3264,7 +3818,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3264 * descriptors are 32 bytes...so we increment tail 3818 * descriptors are 32 bytes...so we increment tail
3265 * twice as much. 3819 * twice as much.
3266 */ 3820 */
3267 E1000_WRITE_REG(&adapter->hw, RDT, i<<1); 3821 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
3268 } 3822 }
3269 3823
3270 if(unlikely(++i == rx_ring->count)) i = 0; 3824 if(unlikely(++i == rx_ring->count)) i = 0;
@@ -3640,6 +4194,7 @@ e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
3640 return 0; 4194 return 0;
3641} 4195}
3642 4196
4197#ifdef CONFIG_PM
3643static int 4198static int
3644e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4199e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3645{ 4200{
@@ -3715,6 +4270,12 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3715 } 4270 }
3716 4271
3717 switch(adapter->hw.mac_type) { 4272 switch(adapter->hw.mac_type) {
4273 case e1000_82571:
4274 case e1000_82572:
4275 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4276 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4277 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
4278 break;
3718 case e1000_82573: 4279 case e1000_82573:
3719 swsm = E1000_READ_REG(&adapter->hw, SWSM); 4280 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3720 E1000_WRITE_REG(&adapter->hw, SWSM, 4281 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3730,13 +4291,13 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3730 return 0; 4291 return 0;
3731} 4292}
3732 4293
3733#ifdef CONFIG_PM
3734static int 4294static int
3735e1000_resume(struct pci_dev *pdev) 4295e1000_resume(struct pci_dev *pdev)
3736{ 4296{
3737 struct net_device *netdev = pci_get_drvdata(pdev); 4297 struct net_device *netdev = pci_get_drvdata(pdev);
3738 struct e1000_adapter *adapter = netdev_priv(netdev); 4298 struct e1000_adapter *adapter = netdev_priv(netdev);
3739 uint32_t manc, ret_val, swsm; 4299 uint32_t manc, ret_val, swsm;
4300 uint32_t ctrl_ext;
3740 4301
3741 pci_set_power_state(pdev, PCI_D0); 4302 pci_set_power_state(pdev, PCI_D0);
3742 pci_restore_state(pdev); 4303 pci_restore_state(pdev);
@@ -3762,6 +4323,12 @@ e1000_resume(struct pci_dev *pdev)
3762 } 4323 }
3763 4324
3764 switch(adapter->hw.mac_type) { 4325 switch(adapter->hw.mac_type) {
4326 case e1000_82571:
4327 case e1000_82572:
4328 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
4329 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
4330 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
4331 break;
3765 case e1000_82573: 4332 case e1000_82573:
3766 swsm = E1000_READ_REG(&adapter->hw, SWSM); 4333 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3767 E1000_WRITE_REG(&adapter->hw, SWSM, 4334 E1000_WRITE_REG(&adapter->hw, SWSM,
@@ -3786,7 +4353,7 @@ e1000_netpoll(struct net_device *netdev)
3786 struct e1000_adapter *adapter = netdev_priv(netdev); 4353 struct e1000_adapter *adapter = netdev_priv(netdev);
3787 disable_irq(adapter->pdev->irq); 4354 disable_irq(adapter->pdev->irq);
3788 e1000_intr(adapter->pdev->irq, netdev, NULL); 4355 e1000_intr(adapter->pdev->irq, netdev, NULL);
3789 e1000_clean_tx_irq(adapter); 4356 e1000_clean_tx_irq(adapter, adapter->tx_ring);
3790 enable_irq(adapter->pdev->irq); 4357 enable_irq(adapter->pdev->irq);
3791} 4358}
3792#endif 4359#endif
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 676247f9f1cc..38695d5b4637 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -306,7 +306,8 @@ e1000_check_options(struct e1000_adapter *adapter)
306 .def = E1000_DEFAULT_TXD, 306 .def = E1000_DEFAULT_TXD,
307 .arg = { .r = { .min = E1000_MIN_TXD }} 307 .arg = { .r = { .min = E1000_MIN_TXD }}
308 }; 308 };
309 struct e1000_desc_ring *tx_ring = &adapter->tx_ring; 309 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
310 int i;
310 e1000_mac_type mac_type = adapter->hw.mac_type; 311 e1000_mac_type mac_type = adapter->hw.mac_type;
311 opt.arg.r.max = mac_type < e1000_82544 ? 312 opt.arg.r.max = mac_type < e1000_82544 ?
312 E1000_MAX_TXD : E1000_MAX_82544_TXD; 313 E1000_MAX_TXD : E1000_MAX_82544_TXD;
@@ -319,6 +320,8 @@ e1000_check_options(struct e1000_adapter *adapter)
319 } else { 320 } else {
320 tx_ring->count = opt.def; 321 tx_ring->count = opt.def;
321 } 322 }
323 for (i = 0; i < adapter->num_queues; i++)
324 tx_ring[i].count = tx_ring->count;
322 } 325 }
323 { /* Receive Descriptor Count */ 326 { /* Receive Descriptor Count */
324 struct e1000_option opt = { 327 struct e1000_option opt = {
@@ -329,7 +332,8 @@ e1000_check_options(struct e1000_adapter *adapter)
329 .def = E1000_DEFAULT_RXD, 332 .def = E1000_DEFAULT_RXD,
330 .arg = { .r = { .min = E1000_MIN_RXD }} 333 .arg = { .r = { .min = E1000_MIN_RXD }}
331 }; 334 };
332 struct e1000_desc_ring *rx_ring = &adapter->rx_ring; 335 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
336 int i;
333 e1000_mac_type mac_type = adapter->hw.mac_type; 337 e1000_mac_type mac_type = adapter->hw.mac_type;
334 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : 338 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
335 E1000_MAX_82544_RXD; 339 E1000_MAX_82544_RXD;
@@ -342,6 +346,8 @@ e1000_check_options(struct e1000_adapter *adapter)
342 } else { 346 } else {
343 rx_ring->count = opt.def; 347 rx_ring->count = opt.def;
344 } 348 }
349 for (i = 0; i < adapter->num_queues; i++)
350 rx_ring[i].count = rx_ring->count;
345 } 351 }
346 { /* Checksum Offload Enable/Disable */ 352 { /* Checksum Offload Enable/Disable */
347 struct e1000_option opt = { 353 struct e1000_option opt = {
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index dcb3028bb60f..a806dfe54d23 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -552,8 +552,7 @@ static int __init do_eepro_probe(struct net_device *dev)
552 { 552 {
553 unsigned short int WS[32]=WakeupSeq; 553 unsigned short int WS[32]=WakeupSeq;
554 554
555 if (check_region(WakeupPort, 2)==0) { 555 if (request_region(WakeupPort, 2, "eepro wakeup")) {
556
557 if (net_debug>5) 556 if (net_debug>5)
558 printk(KERN_DEBUG "Waking UP\n"); 557 printk(KERN_DEBUG "Waking UP\n");
559 558
@@ -563,7 +562,10 @@ static int __init do_eepro_probe(struct net_device *dev)
563 outb_p(WS[i],WakeupPort); 562 outb_p(WS[i],WakeupPort);
564 if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]); 563 if (net_debug>5) printk(KERN_DEBUG ": %#x ",WS[i]);
565 } 564 }
566 } else printk(KERN_WARNING "Checkregion Failed!\n"); 565
566 release_region(WakeupPort, 2);
567 } else
568 printk(KERN_WARNING "PnP wakeup region busy!\n");
567 } 569 }
568#endif 570#endif
569 571
@@ -705,7 +707,7 @@ static void __init eepro_print_info (struct net_device *dev)
705 dev->name, (unsigned)dev->base_addr); 707 dev->name, (unsigned)dev->base_addr);
706 break; 708 break;
707 case LAN595FX: 709 case LAN595FX:
708 printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,", 710 printk("%s: Intel EtherExpress Pro/10+ ISA\n at %#x,",
709 dev->name, (unsigned)dev->base_addr); 711 dev->name, (unsigned)dev->base_addr);
710 break; 712 break;
711 case LAN595TX: 713 case LAN595TX:
@@ -713,7 +715,7 @@ static void __init eepro_print_info (struct net_device *dev)
713 dev->name, (unsigned)dev->base_addr); 715 dev->name, (unsigned)dev->base_addr);
714 break; 716 break;
715 case LAN595: 717 case LAN595:
716 printk("%s: Intel 82595-based lan card at %#x,", 718 printk("%s: Intel 82595-based lan card at %#x,",
717 dev->name, (unsigned)dev->base_addr); 719 dev->name, (unsigned)dev->base_addr);
718 } 720 }
719 721
@@ -726,7 +728,7 @@ static void __init eepro_print_info (struct net_device *dev)
726 728
727 if (dev->irq > 2) 729 if (dev->irq > 2)
728 printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]); 730 printk(", IRQ %d, %s.\n", dev->irq, ifmap[dev->if_port]);
729 else 731 else
730 printk(", %s.\n", ifmap[dev->if_port]); 732 printk(", %s.\n", ifmap[dev->if_port]);
731 733
732 if (net_debug > 3) { 734 if (net_debug > 3) {
@@ -756,7 +758,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
756 int err; 758 int err;
757 759
758 /* Grab the region so we can find another board if autoIRQ fails. */ 760 /* Grab the region so we can find another board if autoIRQ fails. */
759 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { 761 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
760 if (!autoprobe) 762 if (!autoprobe)
761 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", 763 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n",
762 ioaddr); 764 ioaddr);
@@ -838,15 +840,15 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
838 /* Mask off INT number */ 840 /* Mask off INT number */
839 int count = lp->word[1] & 7; 841 int count = lp->word[1] & 7;
840 unsigned irqMask = lp->word[7]; 842 unsigned irqMask = lp->word[7];
841 843
842 while (count--) 844 while (count--)
843 irqMask &= irqMask - 1; 845 irqMask &= irqMask - 1;
844 846
845 count = ffs(irqMask); 847 count = ffs(irqMask);
846 848
847 if (count) 849 if (count)
848 dev->irq = count - 1; 850 dev->irq = count - 1;
849 851
850 if (dev->irq < 2) { 852 if (dev->irq < 2) {
851 printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n"); 853 printk(KERN_ERR " Duh! illegal interrupt vector stored in EEPROM.\n");
852 goto exit; 854 goto exit;
@@ -854,7 +856,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
854 dev->irq = 9; 856 dev->irq = 9;
855 } 857 }
856 } 858 }
857 859
858 dev->open = eepro_open; 860 dev->open = eepro_open;
859 dev->stop = eepro_close; 861 dev->stop = eepro_close;
860 dev->hard_start_xmit = eepro_send_packet; 862 dev->hard_start_xmit = eepro_send_packet;
@@ -863,7 +865,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
863 dev->tx_timeout = eepro_tx_timeout; 865 dev->tx_timeout = eepro_tx_timeout;
864 dev->watchdog_timeo = TX_TIMEOUT; 866 dev->watchdog_timeo = TX_TIMEOUT;
865 dev->ethtool_ops = &eepro_ethtool_ops; 867 dev->ethtool_ops = &eepro_ethtool_ops;
866 868
867 /* print boot time info */ 869 /* print boot time info */
868 eepro_print_info(dev); 870 eepro_print_info(dev);
869 871
@@ -1047,8 +1049,8 @@ static int eepro_open(struct net_device *dev)
1047 1049
1048 1050
1049 /* Initialize the RCV and XMT upper and lower limits */ 1051 /* Initialize the RCV and XMT upper and lower limits */
1050 outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG); 1052 outb(lp->rcv_lower_limit >> 8, ioaddr + RCV_LOWER_LIMIT_REG);
1051 outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG); 1053 outb(lp->rcv_upper_limit >> 8, ioaddr + RCV_UPPER_LIMIT_REG);
1052 outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg); 1054 outb(lp->xmt_lower_limit >> 8, ioaddr + lp->xmt_lower_limit_reg);
1053 outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg); 1055 outb(lp->xmt_upper_limit >> 8, ioaddr + lp->xmt_upper_limit_reg);
1054 1056
@@ -1065,12 +1067,12 @@ static int eepro_open(struct net_device *dev)
1065 eepro_clear_int(ioaddr); 1067 eepro_clear_int(ioaddr);
1066 1068
1067 /* Initialize RCV */ 1069 /* Initialize RCV */
1068 outw(lp->rcv_lower_limit, ioaddr + RCV_BAR); 1070 outw(lp->rcv_lower_limit, ioaddr + RCV_BAR);
1069 lp->rx_start = lp->rcv_lower_limit; 1071 lp->rx_start = lp->rcv_lower_limit;
1070 outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP); 1072 outw(lp->rcv_upper_limit | 0xfe, ioaddr + RCV_STOP);
1071 1073
1072 /* Initialize XMT */ 1074 /* Initialize XMT */
1073 outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar); 1075 outw(lp->xmt_lower_limit, ioaddr + lp->xmt_bar);
1074 lp->tx_start = lp->tx_end = lp->xmt_lower_limit; 1076 lp->tx_start = lp->tx_end = lp->xmt_lower_limit;
1075 lp->tx_last = 0; 1077 lp->tx_last = 0;
1076 1078
@@ -1411,7 +1413,7 @@ set_multicast_list(struct net_device *dev)
1411 outb(0x08, ioaddr + STATUS_REG); 1413 outb(0x08, ioaddr + STATUS_REG);
1412 1414
1413 if (i & 0x20) { /* command ABORTed */ 1415 if (i & 0x20) { /* command ABORTed */
1414 printk(KERN_NOTICE "%s: multicast setup failed.\n", 1416 printk(KERN_NOTICE "%s: multicast setup failed.\n",
1415 dev->name); 1417 dev->name);
1416 break; 1418 break;
1417 } else if ((i & 0x0f) == 0x03) { /* MC-Done */ 1419 } else if ((i & 0x0f) == 0x03) { /* MC-Done */
@@ -1512,7 +1514,7 @@ hardware_send_packet(struct net_device *dev, void *buf, short length)
1512 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER; 1514 end = last + (((length + 3) >> 1) << 1) + XMT_HEADER;
1513 1515
1514 if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */ 1516 if (end >= lp->xmt_upper_limit + 2) { /* the transmit buffer is wrapped around */
1515 if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) { 1517 if ((lp->xmt_upper_limit + 2 - last) <= XMT_HEADER) {
1516 /* Arrrr!!!, must keep the xmt header together, 1518 /* Arrrr!!!, must keep the xmt header together,
1517 several days were lost to chase this one down. */ 1519 several days were lost to chase this one down. */
1518 last = lp->xmt_lower_limit; 1520 last = lp->xmt_lower_limit;
@@ -1643,7 +1645,7 @@ eepro_rx(struct net_device *dev)
1643 else if (rcv_status & 0x0800) 1645 else if (rcv_status & 0x0800)
1644 lp->stats.rx_crc_errors++; 1646 lp->stats.rx_crc_errors++;
1645 1647
1646 printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n", 1648 printk(KERN_DEBUG "%s: event = %#x, status = %#x, next = %#x, size = %#x\n",
1647 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size); 1649 dev->name, rcv_event, rcv_status, rcv_next_frame, rcv_size);
1648 } 1650 }
1649 1651
@@ -1674,10 +1676,10 @@ eepro_transmit_interrupt(struct net_device *dev)
1674{ 1676{
1675 struct eepro_local *lp = netdev_priv(dev); 1677 struct eepro_local *lp = netdev_priv(dev);
1676 short ioaddr = dev->base_addr; 1678 short ioaddr = dev->base_addr;
1677 short boguscount = 25; 1679 short boguscount = 25;
1678 short xmt_status; 1680 short xmt_status;
1679 1681
1680 while ((lp->tx_start != lp->tx_end) && boguscount--) { 1682 while ((lp->tx_start != lp->tx_end) && boguscount--) {
1681 1683
1682 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG); 1684 outw(lp->tx_start, ioaddr + HOST_ADDRESS_REG);
1683 xmt_status = inw(ioaddr+IO_PORT); 1685 xmt_status = inw(ioaddr+IO_PORT);
@@ -1723,7 +1725,7 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
1723{ 1725{
1724 struct eepro_local *lp = (struct eepro_local *)dev->priv; 1726 struct eepro_local *lp = (struct eepro_local *)dev->priv;
1725 1727
1726 cmd->supported = SUPPORTED_10baseT_Half | 1728 cmd->supported = SUPPORTED_10baseT_Half |
1727 SUPPORTED_10baseT_Full | 1729 SUPPORTED_10baseT_Full |
1728 SUPPORTED_Autoneg; 1730 SUPPORTED_Autoneg;
1729 cmd->advertising = ADVERTISED_10baseT_Half | 1731 cmd->advertising = ADVERTISED_10baseT_Half |
@@ -1797,10 +1799,9 @@ MODULE_AUTHOR("Pascal Dupuis and others");
1797MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver"); 1799MODULE_DESCRIPTION("Intel i82595 ISA EtherExpressPro10/10+ driver");
1798MODULE_LICENSE("GPL"); 1800MODULE_LICENSE("GPL");
1799 1801
1800static int num_params; 1802module_param_array(io, int, NULL, 0);
1801module_param_array(io, int, &num_params, 0); 1803module_param_array(irq, int, NULL, 0);
1802module_param_array(irq, int, &num_params, 0); 1804module_param_array(mem, int, NULL, 0);
1803module_param_array(mem, int, &num_params, 0);
1804module_param(autodetect, int, 0); 1805module_param(autodetect, int, 0);
1805MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)"); 1806MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
1806MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)"); 1807MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 87f522738bfc..f119ec4e89ea 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1334,7 +1334,7 @@ static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1334static int epic_poll(struct net_device *dev, int *budget) 1334static int epic_poll(struct net_device *dev, int *budget)
1335{ 1335{
1336 struct epic_private *ep = dev->priv; 1336 struct epic_private *ep = dev->priv;
1337 int work_done, orig_budget; 1337 int work_done = 0, orig_budget;
1338 long ioaddr = dev->base_addr; 1338 long ioaddr = dev->base_addr;
1339 1339
1340 orig_budget = (*budget > dev->quota) ? dev->quota : *budget; 1340 orig_budget = (*budget > dev->quota) ? dev->quota : *budget;
@@ -1343,7 +1343,7 @@ rx_action:
1343 1343
1344 epic_tx(dev, ep); 1344 epic_tx(dev, ep);
1345 1345
1346 work_done = epic_rx(dev, *budget); 1346 work_done += epic_rx(dev, *budget);
1347 1347
1348 epic_rx_err(dev, ep); 1348 epic_rx_err(dev, ep);
1349 1349
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index d6eefdb71c17..22aec6ed80f5 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -95,6 +95,8 @@
95 * of nv_remove 95 * of nv_remove
96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
97 * in the second (and later) nv_open call 97 * in the second (and later) nv_open call
98 * 0.43: 10 Aug 2005: Add support for tx checksum.
99 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
98 * 100 *
99 * Known bugs: 101 * Known bugs:
100 * We suspect that on some hardware no TX done interrupts are generated. 102 * We suspect that on some hardware no TX done interrupts are generated.
@@ -106,7 +108,7 @@
106 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 108 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
107 * superfluous timer interrupts from the nic. 109 * superfluous timer interrupts from the nic.
108 */ 110 */
109#define FORCEDETH_VERSION "0.41" 111#define FORCEDETH_VERSION "0.44"
110#define DRV_NAME "forcedeth" 112#define DRV_NAME "forcedeth"
111 113
112#include <linux/module.h> 114#include <linux/module.h>
@@ -145,6 +147,7 @@
145#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 147#define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */
146#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 148#define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */
147#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 149#define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */
150#define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */
148 151
149enum { 152enum {
150 NvRegIrqStatus = 0x000, 153 NvRegIrqStatus = 0x000,
@@ -241,6 +244,9 @@ enum {
241#define NVREG_TXRXCTL_IDLE 0x0008 244#define NVREG_TXRXCTL_IDLE 0x0008
242#define NVREG_TXRXCTL_RESET 0x0010 245#define NVREG_TXRXCTL_RESET 0x0010
243#define NVREG_TXRXCTL_RXCHECK 0x0400 246#define NVREG_TXRXCTL_RXCHECK 0x0400
247#define NVREG_TXRXCTL_DESC_1 0
248#define NVREG_TXRXCTL_DESC_2 0x02100
249#define NVREG_TXRXCTL_DESC_3 0x02200
244 NvRegMIIStatus = 0x180, 250 NvRegMIIStatus = 0x180,
245#define NVREG_MIISTAT_ERROR 0x0001 251#define NVREG_MIISTAT_ERROR 0x0001
246#define NVREG_MIISTAT_LINKCHANGE 0x0008 252#define NVREG_MIISTAT_LINKCHANGE 0x0008
@@ -335,6 +341,10 @@ typedef union _ring_type {
335/* error and valid are the same for both */ 341/* error and valid are the same for both */
336#define NV_TX2_ERROR (1<<30) 342#define NV_TX2_ERROR (1<<30)
337#define NV_TX2_VALID (1<<31) 343#define NV_TX2_VALID (1<<31)
344#define NV_TX2_TSO (1<<28)
345#define NV_TX2_TSO_SHIFT 14
346#define NV_TX2_CHECKSUM_L3 (1<<27)
347#define NV_TX2_CHECKSUM_L4 (1<<26)
338 348
339#define NV_RX_DESCRIPTORVALID (1<<16) 349#define NV_RX_DESCRIPTORVALID (1<<16)
340#define NV_RX_MISSEDFRAME (1<<17) 350#define NV_RX_MISSEDFRAME (1<<17)
@@ -417,14 +427,14 @@ typedef union _ring_type {
417 427
418/* 428/*
419 * desc_ver values: 429 * desc_ver values:
420 * This field has two purposes: 430 * The nic supports three different descriptor types:
421 * - Newer nics uses a different ring layout. The layout is selected by 431 * - DESC_VER_1: Original
422 * comparing np->desc_ver with DESC_VER_xy. 432 * - DESC_VER_2: support for jumbo frames.
423 * - It contains bits that are forced on when writing to NvRegTxRxControl. 433 * - DESC_VER_3: 64-bit format.
424 */ 434 */
425#define DESC_VER_1 0x0 435#define DESC_VER_1 1
426#define DESC_VER_2 (0x02100|NVREG_TXRXCTL_RXCHECK) 436#define DESC_VER_2 2
427#define DESC_VER_3 (0x02200|NVREG_TXRXCTL_RXCHECK) 437#define DESC_VER_3 3
428 438
429/* PHY defines */ 439/* PHY defines */
430#define PHY_OUI_MARVELL 0x5043 440#define PHY_OUI_MARVELL 0x5043
@@ -491,6 +501,7 @@ struct fe_priv {
491 u32 orig_mac[2]; 501 u32 orig_mac[2];
492 u32 irqmask; 502 u32 irqmask;
493 u32 desc_ver; 503 u32 desc_ver;
504 u32 txrxctl_bits;
494 505
495 void __iomem *base; 506 void __iomem *base;
496 507
@@ -534,7 +545,7 @@ static inline struct fe_priv *get_nvpriv(struct net_device *dev)
534 545
535static inline u8 __iomem *get_hwbase(struct net_device *dev) 546static inline u8 __iomem *get_hwbase(struct net_device *dev)
536{ 547{
537 return get_nvpriv(dev)->base; 548 return ((struct fe_priv *)netdev_priv(dev))->base;
538} 549}
539 550
540static inline void pci_push(u8 __iomem *base) 551static inline void pci_push(u8 __iomem *base)
@@ -623,7 +634,7 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
623 634
624static int phy_reset(struct net_device *dev) 635static int phy_reset(struct net_device *dev)
625{ 636{
626 struct fe_priv *np = get_nvpriv(dev); 637 struct fe_priv *np = netdev_priv(dev);
627 u32 miicontrol; 638 u32 miicontrol;
628 unsigned int tries = 0; 639 unsigned int tries = 0;
629 640
@@ -726,7 +737,7 @@ static int phy_init(struct net_device *dev)
726 737
727static void nv_start_rx(struct net_device *dev) 738static void nv_start_rx(struct net_device *dev)
728{ 739{
729 struct fe_priv *np = get_nvpriv(dev); 740 struct fe_priv *np = netdev_priv(dev);
730 u8 __iomem *base = get_hwbase(dev); 741 u8 __iomem *base = get_hwbase(dev);
731 742
732 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 743 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
@@ -782,14 +793,14 @@ static void nv_stop_tx(struct net_device *dev)
782 793
783static void nv_txrx_reset(struct net_device *dev) 794static void nv_txrx_reset(struct net_device *dev)
784{ 795{
785 struct fe_priv *np = get_nvpriv(dev); 796 struct fe_priv *np = netdev_priv(dev);
786 u8 __iomem *base = get_hwbase(dev); 797 u8 __iomem *base = get_hwbase(dev);
787 798
788 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); 799 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
789 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->desc_ver, base + NvRegTxRxControl); 800 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
790 pci_push(base); 801 pci_push(base);
791 udelay(NV_TXRX_RESET_DELAY); 802 udelay(NV_TXRX_RESET_DELAY);
792 writel(NVREG_TXRXCTL_BIT2 | np->desc_ver, base + NvRegTxRxControl); 803 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
793 pci_push(base); 804 pci_push(base);
794} 805}
795 806
@@ -801,7 +812,7 @@ static void nv_txrx_reset(struct net_device *dev)
801 */ 812 */
802static struct net_device_stats *nv_get_stats(struct net_device *dev) 813static struct net_device_stats *nv_get_stats(struct net_device *dev)
803{ 814{
804 struct fe_priv *np = get_nvpriv(dev); 815 struct fe_priv *np = netdev_priv(dev);
805 816
806 /* It seems that the nic always generates interrupts and doesn't 817 /* It seems that the nic always generates interrupts and doesn't
807 * accumulate errors internally. Thus the current values in np->stats 818 * accumulate errors internally. Thus the current values in np->stats
@@ -817,7 +828,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
817 */ 828 */
818static int nv_alloc_rx(struct net_device *dev) 829static int nv_alloc_rx(struct net_device *dev)
819{ 830{
820 struct fe_priv *np = get_nvpriv(dev); 831 struct fe_priv *np = netdev_priv(dev);
821 unsigned int refill_rx = np->refill_rx; 832 unsigned int refill_rx = np->refill_rx;
822 int nr; 833 int nr;
823 834
@@ -861,7 +872,7 @@ static int nv_alloc_rx(struct net_device *dev)
861static void nv_do_rx_refill(unsigned long data) 872static void nv_do_rx_refill(unsigned long data)
862{ 873{
863 struct net_device *dev = (struct net_device *) data; 874 struct net_device *dev = (struct net_device *) data;
864 struct fe_priv *np = get_nvpriv(dev); 875 struct fe_priv *np = netdev_priv(dev);
865 876
866 disable_irq(dev->irq); 877 disable_irq(dev->irq);
867 if (nv_alloc_rx(dev)) { 878 if (nv_alloc_rx(dev)) {
@@ -875,7 +886,7 @@ static void nv_do_rx_refill(unsigned long data)
875 886
876static void nv_init_rx(struct net_device *dev) 887static void nv_init_rx(struct net_device *dev)
877{ 888{
878 struct fe_priv *np = get_nvpriv(dev); 889 struct fe_priv *np = netdev_priv(dev);
879 int i; 890 int i;
880 891
881 np->cur_rx = RX_RING; 892 np->cur_rx = RX_RING;
@@ -889,15 +900,17 @@ static void nv_init_rx(struct net_device *dev)
889 900
890static void nv_init_tx(struct net_device *dev) 901static void nv_init_tx(struct net_device *dev)
891{ 902{
892 struct fe_priv *np = get_nvpriv(dev); 903 struct fe_priv *np = netdev_priv(dev);
893 int i; 904 int i;
894 905
895 np->next_tx = np->nic_tx = 0; 906 np->next_tx = np->nic_tx = 0;
896 for (i = 0; i < TX_RING; i++) 907 for (i = 0; i < TX_RING; i++) {
897 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 908 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
898 np->tx_ring.orig[i].FlagLen = 0; 909 np->tx_ring.orig[i].FlagLen = 0;
899 else 910 else
900 np->tx_ring.ex[i].FlagLen = 0; 911 np->tx_ring.ex[i].FlagLen = 0;
912 np->tx_skbuff[i] = NULL;
913 }
901} 914}
902 915
903static int nv_init_ring(struct net_device *dev) 916static int nv_init_ring(struct net_device *dev)
@@ -907,21 +920,44 @@ static int nv_init_ring(struct net_device *dev)
907 return nv_alloc_rx(dev); 920 return nv_alloc_rx(dev);
908} 921}
909 922
923static void nv_release_txskb(struct net_device *dev, unsigned int skbnr)
924{
925 struct fe_priv *np = netdev_priv(dev);
926 struct sk_buff *skb = np->tx_skbuff[skbnr];
927 unsigned int j, entry, fragments;
928
929 dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n",
930 dev->name, skbnr, np->tx_skbuff[skbnr]);
931
932 entry = skbnr;
933 if ((fragments = skb_shinfo(skb)->nr_frags) != 0) {
934 for (j = fragments; j >= 1; j--) {
935 skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1];
936 pci_unmap_page(np->pci_dev, np->tx_dma[entry],
937 frag->size,
938 PCI_DMA_TODEVICE);
939 entry = (entry - 1) % TX_RING;
940 }
941 }
942 pci_unmap_single(np->pci_dev, np->tx_dma[entry],
943 skb->len - skb->data_len,
944 PCI_DMA_TODEVICE);
945 dev_kfree_skb_irq(skb);
946 np->tx_skbuff[skbnr] = NULL;
947}
948
910static void nv_drain_tx(struct net_device *dev) 949static void nv_drain_tx(struct net_device *dev)
911{ 950{
912 struct fe_priv *np = get_nvpriv(dev); 951 struct fe_priv *np = netdev_priv(dev);
913 int i; 952 unsigned int i;
953
914 for (i = 0; i < TX_RING; i++) { 954 for (i = 0; i < TX_RING; i++) {
915 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 955 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
916 np->tx_ring.orig[i].FlagLen = 0; 956 np->tx_ring.orig[i].FlagLen = 0;
917 else 957 else
918 np->tx_ring.ex[i].FlagLen = 0; 958 np->tx_ring.ex[i].FlagLen = 0;
919 if (np->tx_skbuff[i]) { 959 if (np->tx_skbuff[i]) {
920 pci_unmap_single(np->pci_dev, np->tx_dma[i], 960 nv_release_txskb(dev, i);
921 np->tx_skbuff[i]->len,
922 PCI_DMA_TODEVICE);
923 dev_kfree_skb(np->tx_skbuff[i]);
924 np->tx_skbuff[i] = NULL;
925 np->stats.tx_dropped++; 961 np->stats.tx_dropped++;
926 } 962 }
927 } 963 }
@@ -929,7 +965,7 @@ static void nv_drain_tx(struct net_device *dev)
929 965
930static void nv_drain_rx(struct net_device *dev) 966static void nv_drain_rx(struct net_device *dev)
931{ 967{
932 struct fe_priv *np = get_nvpriv(dev); 968 struct fe_priv *np = netdev_priv(dev);
933 int i; 969 int i;
934 for (i = 0; i < RX_RING; i++) { 970 for (i = 0; i < RX_RING; i++) {
935 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 971 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
@@ -959,28 +995,69 @@ static void drain_ring(struct net_device *dev)
959 */ 995 */
960static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 996static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
961{ 997{
962 struct fe_priv *np = get_nvpriv(dev); 998 struct fe_priv *np = netdev_priv(dev);
963 int nr = np->next_tx % TX_RING; 999 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1000 unsigned int fragments = skb_shinfo(skb)->nr_frags;
1001 unsigned int nr = (np->next_tx + fragments) % TX_RING;
1002 unsigned int i;
1003
1004 spin_lock_irq(&np->lock);
1005
1006 if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) {
1007 spin_unlock_irq(&np->lock);
1008 netif_stop_queue(dev);
1009 return NETDEV_TX_BUSY;
1010 }
964 1011
965 np->tx_skbuff[nr] = skb; 1012 np->tx_skbuff[nr] = skb;
966 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, 1013
967 PCI_DMA_TODEVICE); 1014 if (fragments) {
1015 dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments);
1016 /* setup descriptors in reverse order */
1017 for (i = fragments; i >= 1; i--) {
1018 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1019 np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size,
1020 PCI_DMA_TODEVICE);
968 1021
969 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1022 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1023 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1024 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
1025 } else {
1026 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1027 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1028 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
1029 }
1030
1031 nr = (nr - 1) % TX_RING;
1032
1033 if (np->desc_ver == DESC_VER_1)
1034 tx_flags_extra &= ~NV_TX_LASTPACKET;
1035 else
1036 tx_flags_extra &= ~NV_TX2_LASTPACKET;
1037 }
1038 }
1039
1040#ifdef NETIF_F_TSO
1041 if (skb_shinfo(skb)->tso_size)
1042 tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
1043 else
1044#endif
1045 tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1046
1047 np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len,
1048 PCI_DMA_TODEVICE);
1049
1050 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
970 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1051 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
971 else { 1052 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
1053 } else {
972 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1054 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
973 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1055 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
974 } 1056 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
1057 }
975 1058
976 spin_lock_irq(&np->lock); 1059 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
977 wmb(); 1060 dev->name, np->next_tx, tx_flags_extra);
978 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
979 np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
980 else
981 np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
982 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
983 dev->name, np->next_tx);
984 { 1061 {
985 int j; 1062 int j;
986 for (j=0; j<64; j++) { 1063 for (j=0; j<64; j++) {
@@ -991,15 +1068,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
991 dprintk("\n"); 1068 dprintk("\n");
992 } 1069 }
993 1070
994 np->next_tx++; 1071 np->next_tx += 1 + fragments;
995 1072
996 dev->trans_start = jiffies; 1073 dev->trans_start = jiffies;
997 if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP)
998 netif_stop_queue(dev);
999 spin_unlock_irq(&np->lock); 1074 spin_unlock_irq(&np->lock);
1000 writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); 1075 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1001 pci_push(get_hwbase(dev)); 1076 pci_push(get_hwbase(dev));
1002 return 0; 1077 return NETDEV_TX_OK;
1003} 1078}
1004 1079
1005/* 1080/*
@@ -1009,9 +1084,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1009 */ 1084 */
1010static void nv_tx_done(struct net_device *dev) 1085static void nv_tx_done(struct net_device *dev)
1011{ 1086{
1012 struct fe_priv *np = get_nvpriv(dev); 1087 struct fe_priv *np = netdev_priv(dev);
1013 u32 Flags; 1088 u32 Flags;
1014 int i; 1089 unsigned int i;
1090 struct sk_buff *skb;
1015 1091
1016 while (np->nic_tx != np->next_tx) { 1092 while (np->nic_tx != np->next_tx) {
1017 i = np->nic_tx % TX_RING; 1093 i = np->nic_tx % TX_RING;
@@ -1026,35 +1102,38 @@ static void nv_tx_done(struct net_device *dev)
1026 if (Flags & NV_TX_VALID) 1102 if (Flags & NV_TX_VALID)
1027 break; 1103 break;
1028 if (np->desc_ver == DESC_VER_1) { 1104 if (np->desc_ver == DESC_VER_1) {
1029 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1105 if (Flags & NV_TX_LASTPACKET) {
1030 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1106 skb = np->tx_skbuff[i];
1031 if (Flags & NV_TX_UNDERFLOW) 1107 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1032 np->stats.tx_fifo_errors++; 1108 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1033 if (Flags & NV_TX_CARRIERLOST) 1109 if (Flags & NV_TX_UNDERFLOW)
1034 np->stats.tx_carrier_errors++; 1110 np->stats.tx_fifo_errors++;
1035 np->stats.tx_errors++; 1111 if (Flags & NV_TX_CARRIERLOST)
1036 } else { 1112 np->stats.tx_carrier_errors++;
1037 np->stats.tx_packets++; 1113 np->stats.tx_errors++;
1038 np->stats.tx_bytes += np->tx_skbuff[i]->len; 1114 } else {
1115 np->stats.tx_packets++;
1116 np->stats.tx_bytes += skb->len;
1117 }
1118 nv_release_txskb(dev, i);
1039 } 1119 }
1040 } else { 1120 } else {
1041 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1121 if (Flags & NV_TX2_LASTPACKET) {
1042 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1122 skb = np->tx_skbuff[i];
1043 if (Flags & NV_TX2_UNDERFLOW) 1123 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1044 np->stats.tx_fifo_errors++; 1124 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1045 if (Flags & NV_TX2_CARRIERLOST) 1125 if (Flags & NV_TX2_UNDERFLOW)
1046 np->stats.tx_carrier_errors++; 1126 np->stats.tx_fifo_errors++;
1047 np->stats.tx_errors++; 1127 if (Flags & NV_TX2_CARRIERLOST)
1048 } else { 1128 np->stats.tx_carrier_errors++;
1049 np->stats.tx_packets++; 1129 np->stats.tx_errors++;
1050 np->stats.tx_bytes += np->tx_skbuff[i]->len; 1130 } else {
1131 np->stats.tx_packets++;
1132 np->stats.tx_bytes += skb->len;
1133 }
1134 nv_release_txskb(dev, i);
1051 } 1135 }
1052 } 1136 }
1053 pci_unmap_single(np->pci_dev, np->tx_dma[i],
1054 np->tx_skbuff[i]->len,
1055 PCI_DMA_TODEVICE);
1056 dev_kfree_skb_irq(np->tx_skbuff[i]);
1057 np->tx_skbuff[i] = NULL;
1058 np->nic_tx++; 1137 np->nic_tx++;
1059 } 1138 }
1060 if (np->next_tx - np->nic_tx < TX_LIMIT_START) 1139 if (np->next_tx - np->nic_tx < TX_LIMIT_START)
@@ -1067,7 +1146,7 @@ static void nv_tx_done(struct net_device *dev)
1067 */ 1146 */
1068static void nv_tx_timeout(struct net_device *dev) 1147static void nv_tx_timeout(struct net_device *dev)
1069{ 1148{
1070 struct fe_priv *np = get_nvpriv(dev); 1149 struct fe_priv *np = netdev_priv(dev);
1071 u8 __iomem *base = get_hwbase(dev); 1150 u8 __iomem *base = get_hwbase(dev);
1072 1151
1073 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, 1152 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
@@ -1200,7 +1279,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1200 1279
1201static void nv_rx_process(struct net_device *dev) 1280static void nv_rx_process(struct net_device *dev)
1202{ 1281{
1203 struct fe_priv *np = get_nvpriv(dev); 1282 struct fe_priv *np = netdev_priv(dev);
1204 u32 Flags; 1283 u32 Flags;
1205 1284
1206 for (;;) { 1285 for (;;) {
@@ -1355,7 +1434,7 @@ static void set_bufsize(struct net_device *dev)
1355 */ 1434 */
1356static int nv_change_mtu(struct net_device *dev, int new_mtu) 1435static int nv_change_mtu(struct net_device *dev, int new_mtu)
1357{ 1436{
1358 struct fe_priv *np = get_nvpriv(dev); 1437 struct fe_priv *np = netdev_priv(dev);
1359 int old_mtu; 1438 int old_mtu;
1360 1439
1361 if (new_mtu < 64 || new_mtu > np->pkt_limit) 1440 if (new_mtu < 64 || new_mtu > np->pkt_limit)
@@ -1408,7 +1487,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1408 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT), 1487 writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
1409 base + NvRegRingSizes); 1488 base + NvRegRingSizes);
1410 pci_push(base); 1489 pci_push(base);
1411 writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl); 1490 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
1412 pci_push(base); 1491 pci_push(base);
1413 1492
1414 /* restart rx engine */ 1493 /* restart rx engine */
@@ -1440,7 +1519,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
1440 */ 1519 */
1441static int nv_set_mac_address(struct net_device *dev, void *addr) 1520static int nv_set_mac_address(struct net_device *dev, void *addr)
1442{ 1521{
1443 struct fe_priv *np = get_nvpriv(dev); 1522 struct fe_priv *np = netdev_priv(dev);
1444 struct sockaddr *macaddr = (struct sockaddr*)addr; 1523 struct sockaddr *macaddr = (struct sockaddr*)addr;
1445 1524
1446 if(!is_valid_ether_addr(macaddr->sa_data)) 1525 if(!is_valid_ether_addr(macaddr->sa_data))
@@ -1475,7 +1554,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1475 */ 1554 */
1476static void nv_set_multicast(struct net_device *dev) 1555static void nv_set_multicast(struct net_device *dev)
1477{ 1556{
1478 struct fe_priv *np = get_nvpriv(dev); 1557 struct fe_priv *np = netdev_priv(dev);
1479 u8 __iomem *base = get_hwbase(dev); 1558 u8 __iomem *base = get_hwbase(dev);
1480 u32 addr[2]; 1559 u32 addr[2];
1481 u32 mask[2]; 1560 u32 mask[2];
@@ -1535,7 +1614,7 @@ static void nv_set_multicast(struct net_device *dev)
1535 1614
1536static int nv_update_linkspeed(struct net_device *dev) 1615static int nv_update_linkspeed(struct net_device *dev)
1537{ 1616{
1538 struct fe_priv *np = get_nvpriv(dev); 1617 struct fe_priv *np = netdev_priv(dev);
1539 u8 __iomem *base = get_hwbase(dev); 1618 u8 __iomem *base = get_hwbase(dev);
1540 int adv, lpa; 1619 int adv, lpa;
1541 int newls = np->linkspeed; 1620 int newls = np->linkspeed;
@@ -1705,7 +1784,7 @@ static void nv_link_irq(struct net_device *dev)
1705static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) 1784static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1706{ 1785{
1707 struct net_device *dev = (struct net_device *) data; 1786 struct net_device *dev = (struct net_device *) data;
1708 struct fe_priv *np = get_nvpriv(dev); 1787 struct fe_priv *np = netdev_priv(dev);
1709 u8 __iomem *base = get_hwbase(dev); 1788 u8 __iomem *base = get_hwbase(dev);
1710 u32 events; 1789 u32 events;
1711 int i; 1790 int i;
@@ -1777,7 +1856,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
1777static void nv_do_nic_poll(unsigned long data) 1856static void nv_do_nic_poll(unsigned long data)
1778{ 1857{
1779 struct net_device *dev = (struct net_device *) data; 1858 struct net_device *dev = (struct net_device *) data;
1780 struct fe_priv *np = get_nvpriv(dev); 1859 struct fe_priv *np = netdev_priv(dev);
1781 u8 __iomem *base = get_hwbase(dev); 1860 u8 __iomem *base = get_hwbase(dev);
1782 1861
1783 disable_irq(dev->irq); 1862 disable_irq(dev->irq);
@@ -1801,7 +1880,7 @@ static void nv_poll_controller(struct net_device *dev)
1801 1880
1802static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1881static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1803{ 1882{
1804 struct fe_priv *np = get_nvpriv(dev); 1883 struct fe_priv *np = netdev_priv(dev);
1805 strcpy(info->driver, "forcedeth"); 1884 strcpy(info->driver, "forcedeth");
1806 strcpy(info->version, FORCEDETH_VERSION); 1885 strcpy(info->version, FORCEDETH_VERSION);
1807 strcpy(info->bus_info, pci_name(np->pci_dev)); 1886 strcpy(info->bus_info, pci_name(np->pci_dev));
@@ -1809,7 +1888,7 @@ static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1809 1888
1810static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 1889static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1811{ 1890{
1812 struct fe_priv *np = get_nvpriv(dev); 1891 struct fe_priv *np = netdev_priv(dev);
1813 wolinfo->supported = WAKE_MAGIC; 1892 wolinfo->supported = WAKE_MAGIC;
1814 1893
1815 spin_lock_irq(&np->lock); 1894 spin_lock_irq(&np->lock);
@@ -1820,7 +1899,7 @@ static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1820 1899
1821static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 1900static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
1822{ 1901{
1823 struct fe_priv *np = get_nvpriv(dev); 1902 struct fe_priv *np = netdev_priv(dev);
1824 u8 __iomem *base = get_hwbase(dev); 1903 u8 __iomem *base = get_hwbase(dev);
1825 1904
1826 spin_lock_irq(&np->lock); 1905 spin_lock_irq(&np->lock);
@@ -2021,7 +2100,7 @@ static int nv_get_regs_len(struct net_device *dev)
2021 2100
2022static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2101static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2023{ 2102{
2024 struct fe_priv *np = get_nvpriv(dev); 2103 struct fe_priv *np = netdev_priv(dev);
2025 u8 __iomem *base = get_hwbase(dev); 2104 u8 __iomem *base = get_hwbase(dev);
2026 u32 *rbuf = buf; 2105 u32 *rbuf = buf;
2027 int i; 2106 int i;
@@ -2035,7 +2114,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
2035 2114
2036static int nv_nway_reset(struct net_device *dev) 2115static int nv_nway_reset(struct net_device *dev)
2037{ 2116{
2038 struct fe_priv *np = get_nvpriv(dev); 2117 struct fe_priv *np = netdev_priv(dev);
2039 int ret; 2118 int ret;
2040 2119
2041 spin_lock_irq(&np->lock); 2120 spin_lock_irq(&np->lock);
@@ -2065,11 +2144,12 @@ static struct ethtool_ops ops = {
2065 .get_regs_len = nv_get_regs_len, 2144 .get_regs_len = nv_get_regs_len,
2066 .get_regs = nv_get_regs, 2145 .get_regs = nv_get_regs,
2067 .nway_reset = nv_nway_reset, 2146 .nway_reset = nv_nway_reset,
2147 .get_perm_addr = ethtool_op_get_perm_addr,
2068}; 2148};
2069 2149
2070static int nv_open(struct net_device *dev) 2150static int nv_open(struct net_device *dev)
2071{ 2151{
2072 struct fe_priv *np = get_nvpriv(dev); 2152 struct fe_priv *np = netdev_priv(dev);
2073 u8 __iomem *base = get_hwbase(dev); 2153 u8 __iomem *base = get_hwbase(dev);
2074 int ret, oom, i; 2154 int ret, oom, i;
2075 2155
@@ -2114,9 +2194,9 @@ static int nv_open(struct net_device *dev)
2114 /* 5) continue setup */ 2194 /* 5) continue setup */
2115 writel(np->linkspeed, base + NvRegLinkSpeed); 2195 writel(np->linkspeed, base + NvRegLinkSpeed);
2116 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3); 2196 writel(NVREG_UNKSETUP3_VAL1, base + NvRegUnknownSetupReg3);
2117 writel(np->desc_ver, base + NvRegTxRxControl); 2197 writel(np->txrxctl_bits, base + NvRegTxRxControl);
2118 pci_push(base); 2198 pci_push(base);
2119 writel(NVREG_TXRXCTL_BIT1|np->desc_ver, base + NvRegTxRxControl); 2199 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
2120 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 2200 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
2121 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 2201 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
2122 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 2202 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
@@ -2205,7 +2285,7 @@ out_drain:
2205 2285
2206static int nv_close(struct net_device *dev) 2286static int nv_close(struct net_device *dev)
2207{ 2287{
2208 struct fe_priv *np = get_nvpriv(dev); 2288 struct fe_priv *np = netdev_priv(dev);
2209 u8 __iomem *base; 2289 u8 __iomem *base;
2210 2290
2211 spin_lock_irq(&np->lock); 2291 spin_lock_irq(&np->lock);
@@ -2261,7 +2341,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2261 if (!dev) 2341 if (!dev)
2262 goto out; 2342 goto out;
2263 2343
2264 np = get_nvpriv(dev); 2344 np = netdev_priv(dev);
2265 np->pci_dev = pci_dev; 2345 np->pci_dev = pci_dev;
2266 spin_lock_init(&np->lock); 2346 spin_lock_init(&np->lock);
2267 SET_MODULE_OWNER(dev); 2347 SET_MODULE_OWNER(dev);
@@ -2313,19 +2393,32 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2313 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 2393 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2314 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2394 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2315 pci_name(pci_dev)); 2395 pci_name(pci_dev));
2396 } else {
2397 dev->features |= NETIF_F_HIGHDMA;
2316 } 2398 }
2399 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2317 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 2400 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2318 /* packet format 2: supports jumbo frames */ 2401 /* packet format 2: supports jumbo frames */
2319 np->desc_ver = DESC_VER_2; 2402 np->desc_ver = DESC_VER_2;
2403 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
2320 } else { 2404 } else {
2321 /* original packet format */ 2405 /* original packet format */
2322 np->desc_ver = DESC_VER_1; 2406 np->desc_ver = DESC_VER_1;
2407 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
2323 } 2408 }
2324 2409
2325 np->pkt_limit = NV_PKTLIMIT_1; 2410 np->pkt_limit = NV_PKTLIMIT_1;
2326 if (id->driver_data & DEV_HAS_LARGEDESC) 2411 if (id->driver_data & DEV_HAS_LARGEDESC)
2327 np->pkt_limit = NV_PKTLIMIT_2; 2412 np->pkt_limit = NV_PKTLIMIT_2;
2328 2413
2414 if (id->driver_data & DEV_HAS_CHECKSUM) {
2415 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
2416 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2417#ifdef NETIF_F_TSO
2418 dev->features |= NETIF_F_TSO;
2419#endif
2420 }
2421
2329 err = -ENOMEM; 2422 err = -ENOMEM;
2330 np->base = ioremap(addr, NV_PCI_REGSZ); 2423 np->base = ioremap(addr, NV_PCI_REGSZ);
2331 if (!np->base) 2424 if (!np->base)
@@ -2377,8 +2470,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2377 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 2470 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
2378 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 2471 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
2379 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 2472 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
2473 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
2380 2474
2381 if (!is_valid_ether_addr(dev->dev_addr)) { 2475 if (!is_valid_ether_addr(dev->perm_addr)) {
2382 /* 2476 /*
2383 * Bad mac address. At least one bios sets the mac address 2477 * Bad mac address. At least one bios sets the mac address
2384 * to 01:23:45:67:89:ab 2478 * to 01:23:45:67:89:ab
@@ -2403,9 +2497,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2403 np->wolenabled = 0; 2497 np->wolenabled = 0;
2404 2498
2405 if (np->desc_ver == DESC_VER_1) { 2499 if (np->desc_ver == DESC_VER_1) {
2406 np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; 2500 np->tx_flags = NV_TX_VALID;
2407 } else { 2501 } else {
2408 np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; 2502 np->tx_flags = NV_TX2_VALID;
2409 } 2503 }
2410 np->irqmask = NVREG_IRQMASK_WANTED; 2504 np->irqmask = NVREG_IRQMASK_WANTED;
2411 if (id->driver_data & DEV_NEED_TIMERIRQ) 2505 if (id->driver_data & DEV_NEED_TIMERIRQ)
@@ -2494,7 +2588,7 @@ out:
2494static void __devexit nv_remove(struct pci_dev *pci_dev) 2588static void __devexit nv_remove(struct pci_dev *pci_dev)
2495{ 2589{
2496 struct net_device *dev = pci_get_drvdata(pci_dev); 2590 struct net_device *dev = pci_get_drvdata(pci_dev);
2497 struct fe_priv *np = get_nvpriv(dev); 2591 struct fe_priv *np = netdev_priv(dev);
2498 2592
2499 unregister_netdev(dev); 2593 unregister_netdev(dev);
2500 2594
@@ -2525,35 +2619,35 @@ static struct pci_device_id pci_tbl[] = {
2525 }, 2619 },
2526 { /* nForce3 Ethernet Controller */ 2620 { /* nForce3 Ethernet Controller */
2527 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4), 2621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
2528 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, 2622 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2529 }, 2623 },
2530 { /* nForce3 Ethernet Controller */ 2624 { /* nForce3 Ethernet Controller */
2531 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5), 2625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
2532 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, 2626 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2533 }, 2627 },
2534 { /* nForce3 Ethernet Controller */ 2628 { /* nForce3 Ethernet Controller */
2535 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6), 2629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
2536 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, 2630 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2537 }, 2631 },
2538 { /* nForce3 Ethernet Controller */ 2632 { /* nForce3 Ethernet Controller */
2539 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7), 2633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
2540 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC, 2634 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
2541 }, 2635 },
2542 { /* CK804 Ethernet Controller */ 2636 { /* CK804 Ethernet Controller */
2543 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8), 2637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
2544 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, 2638 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2545 }, 2639 },
2546 { /* CK804 Ethernet Controller */ 2640 { /* CK804 Ethernet Controller */
2547 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9), 2641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
2548 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, 2642 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2549 }, 2643 },
2550 { /* MCP04 Ethernet Controller */ 2644 { /* MCP04 Ethernet Controller */
2551 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10), 2645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
2552 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, 2646 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2553 }, 2647 },
2554 { /* MCP04 Ethernet Controller */ 2648 { /* MCP04 Ethernet Controller */
2555 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11), 2649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
2556 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, 2650 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2557 }, 2651 },
2558 { /* MCP51 Ethernet Controller */ 2652 { /* MCP51 Ethernet Controller */
2559 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 2653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
@@ -2565,11 +2659,11 @@ static struct pci_device_id pci_tbl[] = {
2565 }, 2659 },
2566 { /* MCP55 Ethernet Controller */ 2660 { /* MCP55 Ethernet Controller */
2567 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 2661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
2568 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, 2662 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2569 }, 2663 },
2570 { /* MCP55 Ethernet Controller */ 2664 { /* MCP55 Ethernet Controller */
2571 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 2665 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
2572 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA, 2666 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA,
2573 }, 2667 },
2574 {0,}, 2668 {0,},
2575}; 2669};
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
new file mode 100644
index 000000000000..6aaee67dd4b7
--- /dev/null
+++ b/drivers/net/fs_enet/Kconfig
@@ -0,0 +1,20 @@
1config FS_ENET
2 tristate "Freescale Ethernet Driver"
3 depends on NET_ETHERNET && (CPM1 || CPM2)
4 select MII
5
6config FS_ENET_HAS_SCC
7 bool "Chip has an SCC usable for ethernet"
8 depends on FS_ENET && (CPM1 || CPM2)
9 default y
10
11config FS_ENET_HAS_FCC
12 bool "Chip has an FCC usable for ethernet"
13 depends on FS_ENET && CPM2
14 default y
15
16config FS_ENET_HAS_FEC
17 bool "Chip has an FEC usable for ethernet"
18 depends on FS_ENET && CPM1
19 default y
20
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
new file mode 100644
index 000000000000..d6dd3f2fb43e
--- /dev/null
+++ b/drivers/net/fs_enet/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the Freescale Ethernet controllers
3#
4
5obj-$(CONFIG_FS_ENET) += fs_enet.o
6
7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o
8obj-$(CONFIG_8260) += mac-fcc.o
9
10fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
new file mode 100644
index 000000000000..44fac7373289
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -0,0 +1,1226 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18#include <linux/config.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/mii.h>
37#include <linux/ethtool.h>
38#include <linux/bitops.h>
39#include <linux/fs.h>
40
41#include <linux/vmalloc.h>
42#include <asm/pgtable.h>
43
44#include <asm/pgtable.h>
45#include <asm/irq.h>
46#include <asm/uaccess.h>
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52static char version[] __devinitdata =
53 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
54
55MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
56MODULE_DESCRIPTION("Freescale Ethernet Driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(DRV_MODULE_VERSION);
59
60MODULE_PARM(fs_enet_debug, "i");
61MODULE_PARM_DESC(fs_enet_debug,
62 "Freescale bitmapped debugging message enable value");
63
64int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
65
66static void fs_set_multicast_list(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 (*fep->ops->set_multicast_list)(dev);
71}
72
73/* NAPI receive function */
74static int fs_enet_rx_napi(struct net_device *dev, int *budget)
75{
76 struct fs_enet_private *fep = netdev_priv(dev);
77 const struct fs_platform_info *fpi = fep->fpi;
78 cbd_t *bdp;
79 struct sk_buff *skb, *skbn, *skbt;
80 int received = 0;
81 u16 pkt_len, sc;
82 int curidx;
83 int rx_work_limit = 0; /* pacify gcc */
84
85 rx_work_limit = min(dev->quota, *budget);
86
87 if (!netif_running(dev))
88 return 0;
89
90 /*
91 * First, grab all of the stats for the incoming packet.
92 * These get messed up if we get called due to a busy condition.
93 */
94 bdp = fep->cur_rx;
95
96 /* clear RX status bits for napi*/
97 (*fep->ops->napi_clear_rx_event)(dev);
98
99 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
100
101 curidx = bdp - fep->rx_bd_base;
102
103 /*
104 * Since we have allocated space to hold a complete frame,
105 * the last indicator should be set.
106 */
107 if ((sc & BD_ENET_RX_LAST) == 0)
108 printk(KERN_WARNING DRV_MODULE_NAME
109 ": %s rcv is not +last\n",
110 dev->name);
111
112 /*
113 * Check for errors.
114 */
115 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
116 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
117 fep->stats.rx_errors++;
118 /* Frame too long or too short. */
119 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
120 fep->stats.rx_length_errors++;
121 /* Frame alignment */
122 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
123 fep->stats.rx_frame_errors++;
124 /* CRC Error */
125 if (sc & BD_ENET_RX_CR)
126 fep->stats.rx_crc_errors++;
127 /* FIFO overrun */
128 if (sc & BD_ENET_RX_OV)
129 fep->stats.rx_crc_errors++;
130
131 skb = fep->rx_skbuff[curidx];
132
133 dma_unmap_single(fep->dev, skb->data,
134 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
135 DMA_FROM_DEVICE);
136
137 skbn = skb;
138
139 } else {
140
141 /* napi, got packet but no quota */
142 if (--rx_work_limit < 0)
143 break;
144
145 skb = fep->rx_skbuff[curidx];
146
147 dma_unmap_single(fep->dev, skb->data,
148 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
149 DMA_FROM_DEVICE);
150
151 /*
152 * Process the incoming frame.
153 */
154 fep->stats.rx_packets++;
155 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
156 fep->stats.rx_bytes += pkt_len + 4;
157
158 if (pkt_len <= fpi->rx_copybreak) {
159 /* +2 to make IP header L1 cache aligned */
160 skbn = dev_alloc_skb(pkt_len + 2);
161 if (skbn != NULL) {
162 skb_reserve(skbn, 2); /* align IP header */
163 memcpy(skbn->data, skb->data, pkt_len);
164 /* swap */
165 skbt = skb;
166 skb = skbn;
167 skbn = skbt;
168 }
169 } else
170 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
171
172 if (skbn != NULL) {
173 skb->dev = dev;
174 skb_put(skb, pkt_len); /* Make room */
175 skb->protocol = eth_type_trans(skb, dev);
176 received++;
177 netif_receive_skb(skb);
178 } else {
179 printk(KERN_WARNING DRV_MODULE_NAME
180 ": %s Memory squeeze, dropping packet.\n",
181 dev->name);
182 fep->stats.rx_dropped++;
183 skbn = skb;
184 }
185 }
186
187 fep->rx_skbuff[curidx] = skbn;
188 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
189 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
190 DMA_FROM_DEVICE));
191 CBDW_DATLEN(bdp, 0);
192 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
193
194 /*
195 * Update BD pointer to next entry.
196 */
197 if ((sc & BD_ENET_RX_WRAP) == 0)
198 bdp++;
199 else
200 bdp = fep->rx_bd_base;
201
202 (*fep->ops->rx_bd_done)(dev);
203 }
204
205 fep->cur_rx = bdp;
206
207 dev->quota -= received;
208 *budget -= received;
209
210 if (rx_work_limit < 0)
211 return 1; /* not done */
212
213 /* done */
214 netif_rx_complete(dev);
215
216 (*fep->ops->napi_enable_rx)(dev);
217
218 return 0;
219}
220
221/* non NAPI receive function */
222static int fs_enet_rx_non_napi(struct net_device *dev)
223{
224 struct fs_enet_private *fep = netdev_priv(dev);
225 const struct fs_platform_info *fpi = fep->fpi;
226 cbd_t *bdp;
227 struct sk_buff *skb, *skbn, *skbt;
228 int received = 0;
229 u16 pkt_len, sc;
230 int curidx;
231 /*
232 * First, grab all of the stats for the incoming packet.
233 * These get messed up if we get called due to a busy condition.
234 */
235 bdp = fep->cur_rx;
236
237 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
238
239 curidx = bdp - fep->rx_bd_base;
240
241 /*
242 * Since we have allocated space to hold a complete frame,
243 * the last indicator should be set.
244 */
245 if ((sc & BD_ENET_RX_LAST) == 0)
246 printk(KERN_WARNING DRV_MODULE_NAME
247 ": %s rcv is not +last\n",
248 dev->name);
249
250 /*
251 * Check for errors.
252 */
253 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
254 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
255 fep->stats.rx_errors++;
256 /* Frame too long or too short. */
257 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
258 fep->stats.rx_length_errors++;
259 /* Frame alignment */
260 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
261 fep->stats.rx_frame_errors++;
262 /* CRC Error */
263 if (sc & BD_ENET_RX_CR)
264 fep->stats.rx_crc_errors++;
265 /* FIFO overrun */
266 if (sc & BD_ENET_RX_OV)
267 fep->stats.rx_crc_errors++;
268
269 skb = fep->rx_skbuff[curidx];
270
271 dma_unmap_single(fep->dev, skb->data,
272 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
273 DMA_FROM_DEVICE);
274
275 skbn = skb;
276
277 } else {
278
279 skb = fep->rx_skbuff[curidx];
280
281 dma_unmap_single(fep->dev, skb->data,
282 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
283 DMA_FROM_DEVICE);
284
285 /*
286 * Process the incoming frame.
287 */
288 fep->stats.rx_packets++;
289 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
290 fep->stats.rx_bytes += pkt_len + 4;
291
292 if (pkt_len <= fpi->rx_copybreak) {
293 /* +2 to make IP header L1 cache aligned */
294 skbn = dev_alloc_skb(pkt_len + 2);
295 if (skbn != NULL) {
296 skb_reserve(skbn, 2); /* align IP header */
297 memcpy(skbn->data, skb->data, pkt_len);
298 /* swap */
299 skbt = skb;
300 skb = skbn;
301 skbn = skbt;
302 }
303 } else
304 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
305
306 if (skbn != NULL) {
307 skb->dev = dev;
308 skb_put(skb, pkt_len); /* Make room */
309 skb->protocol = eth_type_trans(skb, dev);
310 received++;
311 netif_rx(skb);
312 } else {
313 printk(KERN_WARNING DRV_MODULE_NAME
314 ": %s Memory squeeze, dropping packet.\n",
315 dev->name);
316 fep->stats.rx_dropped++;
317 skbn = skb;
318 }
319 }
320
321 fep->rx_skbuff[curidx] = skbn;
322 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
323 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
324 DMA_FROM_DEVICE));
325 CBDW_DATLEN(bdp, 0);
326 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
327
328 /*
329 * Update BD pointer to next entry.
330 */
331 if ((sc & BD_ENET_RX_WRAP) == 0)
332 bdp++;
333 else
334 bdp = fep->rx_bd_base;
335
336 (*fep->ops->rx_bd_done)(dev);
337 }
338
339 fep->cur_rx = bdp;
340
341 return 0;
342}
343
344static void fs_enet_tx(struct net_device *dev)
345{
346 struct fs_enet_private *fep = netdev_priv(dev);
347 cbd_t *bdp;
348 struct sk_buff *skb;
349 int dirtyidx, do_wake, do_restart;
350 u16 sc;
351
352 spin_lock(&fep->lock);
353 bdp = fep->dirty_tx;
354
355 do_wake = do_restart = 0;
356 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
357
358 dirtyidx = bdp - fep->tx_bd_base;
359
360 if (fep->tx_free == fep->tx_ring)
361 break;
362
363 skb = fep->tx_skbuff[dirtyidx];
364
365 /*
366 * Check for errors.
367 */
368 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
369 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
370
371 if (sc & BD_ENET_TX_HB) /* No heartbeat */
372 fep->stats.tx_heartbeat_errors++;
373 if (sc & BD_ENET_TX_LC) /* Late collision */
374 fep->stats.tx_window_errors++;
375 if (sc & BD_ENET_TX_RL) /* Retrans limit */
376 fep->stats.tx_aborted_errors++;
377 if (sc & BD_ENET_TX_UN) /* Underrun */
378 fep->stats.tx_fifo_errors++;
379 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
380 fep->stats.tx_carrier_errors++;
381
382 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
383 fep->stats.tx_errors++;
384 do_restart = 1;
385 }
386 } else
387 fep->stats.tx_packets++;
388
389 if (sc & BD_ENET_TX_READY)
390 printk(KERN_WARNING DRV_MODULE_NAME
391 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
392 dev->name);
393
394 /*
395 * Deferred means some collisions occurred during transmit,
396 * but we eventually sent the packet OK.
397 */
398 if (sc & BD_ENET_TX_DEF)
399 fep->stats.collisions++;
400
401 /* unmap */
402 dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
403
404 /*
405 * Free the sk buffer associated with this last transmit.
406 */
407 dev_kfree_skb_irq(skb);
408 fep->tx_skbuff[dirtyidx] = NULL;
409
410 /*
411 * Update pointer to next buffer descriptor to be transmitted.
412 */
413 if ((sc & BD_ENET_TX_WRAP) == 0)
414 bdp++;
415 else
416 bdp = fep->tx_bd_base;
417
418 /*
419 * Since we have freed up a buffer, the ring is no longer
420 * full.
421 */
422 if (!fep->tx_free++)
423 do_wake = 1;
424 }
425
426 fep->dirty_tx = bdp;
427
428 if (do_restart)
429 (*fep->ops->tx_restart)(dev);
430
431 spin_unlock(&fep->lock);
432
433 if (do_wake)
434 netif_wake_queue(dev);
435}
436
437/*
438 * The interrupt handler.
439 * This is called from the MPC core interrupt.
440 */
441static irqreturn_t
442fs_enet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
443{
444 struct net_device *dev = dev_id;
445 struct fs_enet_private *fep;
446 const struct fs_platform_info *fpi;
447 u32 int_events;
448 u32 int_clr_events;
449 int nr, napi_ok;
450 int handled;
451
452 fep = netdev_priv(dev);
453 fpi = fep->fpi;
454
455 nr = 0;
456 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
457
458 nr++;
459
460 int_clr_events = int_events;
461 if (fpi->use_napi)
462 int_clr_events &= ~fep->ev_napi_rx;
463
464 (*fep->ops->clear_int_events)(dev, int_clr_events);
465
466 if (int_events & fep->ev_err)
467 (*fep->ops->ev_error)(dev, int_events);
468
469 if (int_events & fep->ev_rx) {
470 if (!fpi->use_napi)
471 fs_enet_rx_non_napi(dev);
472 else {
473 napi_ok = netif_rx_schedule_prep(dev);
474
475 (*fep->ops->napi_disable_rx)(dev);
476 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
477
478 /* NOTE: it is possible for FCCs in NAPI mode */
479 /* to submit a spurious interrupt while in poll */
480 if (napi_ok)
481 __netif_rx_schedule(dev);
482 }
483 }
484
485 if (int_events & fep->ev_tx)
486 fs_enet_tx(dev);
487 }
488
489 handled = nr > 0;
490 return IRQ_RETVAL(handled);
491}
492
493void fs_init_bds(struct net_device *dev)
494{
495 struct fs_enet_private *fep = netdev_priv(dev);
496 cbd_t *bdp;
497 struct sk_buff *skb;
498 int i;
499
500 fs_cleanup_bds(dev);
501
502 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
503 fep->tx_free = fep->tx_ring;
504 fep->cur_rx = fep->rx_bd_base;
505
506 /*
507 * Initialize the receive buffer descriptors.
508 */
509 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
510 skb = dev_alloc_skb(ENET_RX_FRSIZE);
511 if (skb == NULL) {
512 printk(KERN_WARNING DRV_MODULE_NAME
513 ": %s Memory squeeze, unable to allocate skb\n",
514 dev->name);
515 break;
516 }
517 fep->rx_skbuff[i] = skb;
518 skb->dev = dev;
519 CBDW_BUFADDR(bdp,
520 dma_map_single(fep->dev, skb->data,
521 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
522 DMA_FROM_DEVICE));
523 CBDW_DATLEN(bdp, 0); /* zero */
524 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
525 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
526 }
527 /*
528 * if we failed, fillup remainder
529 */
530 for (; i < fep->rx_ring; i++, bdp++) {
531 fep->rx_skbuff[i] = NULL;
532 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
533 }
534
535 /*
536 * ...and the same for transmit.
537 */
538 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
539 fep->tx_skbuff[i] = NULL;
540 CBDW_BUFADDR(bdp, 0);
541 CBDW_DATLEN(bdp, 0);
542 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
543 }
544}
545
546void fs_cleanup_bds(struct net_device *dev)
547{
548 struct fs_enet_private *fep = netdev_priv(dev);
549 struct sk_buff *skb;
550 int i;
551
552 /*
553 * Reset SKB transmit buffers.
554 */
555 for (i = 0; i < fep->tx_ring; i++) {
556 if ((skb = fep->tx_skbuff[i]) == NULL)
557 continue;
558
559 /* unmap */
560 dma_unmap_single(fep->dev, skb->data, skb->len, DMA_TO_DEVICE);
561
562 fep->tx_skbuff[i] = NULL;
563 dev_kfree_skb(skb);
564 }
565
566 /*
567 * Reset SKB receive buffers
568 */
569 for (i = 0; i < fep->rx_ring; i++) {
570 if ((skb = fep->rx_skbuff[i]) == NULL)
571 continue;
572
573 /* unmap */
574 dma_unmap_single(fep->dev, skb->data,
575 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
576 DMA_FROM_DEVICE);
577
578 fep->rx_skbuff[i] = NULL;
579
580 dev_kfree_skb(skb);
581 }
582}
583
584/**********************************************************************************/
585
586static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
587{
588 struct fs_enet_private *fep = netdev_priv(dev);
589 cbd_t *bdp;
590 int curidx;
591 u16 sc;
592 unsigned long flags;
593
594 spin_lock_irqsave(&fep->tx_lock, flags);
595
596 /*
597 * Fill in a Tx ring entry
598 */
599 bdp = fep->cur_tx;
600
601 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
602 netif_stop_queue(dev);
603 spin_unlock_irqrestore(&fep->tx_lock, flags);
604
605 /*
606 * Ooops. All transmit buffers are full. Bail out.
607 * This should not happen, since the tx queue should be stopped.
608 */
609 printk(KERN_WARNING DRV_MODULE_NAME
610 ": %s tx queue full!.\n", dev->name);
611 return NETDEV_TX_BUSY;
612 }
613
614 curidx = bdp - fep->tx_bd_base;
615 /*
616 * Clear all of the status flags.
617 */
618 CBDC_SC(bdp, BD_ENET_TX_STATS);
619
620 /*
621 * Save skb pointer.
622 */
623 fep->tx_skbuff[curidx] = skb;
624
625 fep->stats.tx_bytes += skb->len;
626
627 /*
628 * Push the data cache so the CPM does not get stale memory data.
629 */
630 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
631 skb->data, skb->len, DMA_TO_DEVICE));
632 CBDW_DATLEN(bdp, skb->len);
633
634 dev->trans_start = jiffies;
635
636 /*
637 * If this was the last BD in the ring, start at the beginning again.
638 */
639 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
640 fep->cur_tx++;
641 else
642 fep->cur_tx = fep->tx_bd_base;
643
644 if (!--fep->tx_free)
645 netif_stop_queue(dev);
646
647 /* Trigger transmission start */
648 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
649 BD_ENET_TX_LAST | BD_ENET_TX_TC;
650
651 /* note that while FEC does not have this bit
652 * it marks it as available for software use
653 * yay for hw reuse :) */
654 if (skb->len <= 60)
655 sc |= BD_ENET_TX_PAD;
656 CBDS_SC(bdp, sc);
657
658 (*fep->ops->tx_kickstart)(dev);
659
660 spin_unlock_irqrestore(&fep->tx_lock, flags);
661
662 return NETDEV_TX_OK;
663}
664
665static int fs_request_irq(struct net_device *dev, int irq, const char *name,
666 irqreturn_t (*irqf)(int irq, void *dev_id, struct pt_regs *regs))
667{
668 struct fs_enet_private *fep = netdev_priv(dev);
669
670 (*fep->ops->pre_request_irq)(dev, irq);
671 return request_irq(irq, irqf, SA_SHIRQ, name, dev);
672}
673
674static void fs_free_irq(struct net_device *dev, int irq)
675{
676 struct fs_enet_private *fep = netdev_priv(dev);
677
678 free_irq(irq, dev);
679 (*fep->ops->post_free_irq)(dev, irq);
680}
681
682/**********************************************************************************/
683
684/* This interrupt occurs when the PHY detects a link change. */
685static irqreturn_t
686fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
687{
688 struct net_device *dev = dev_id;
689 struct fs_enet_private *fep;
690 const struct fs_platform_info *fpi;
691
692 fep = netdev_priv(dev);
693 fpi = fep->fpi;
694
695 /*
696 * Acknowledge the interrupt if possible. If we have not
697 * found the PHY yet we can't process or acknowledge the
698 * interrupt now. Instead we ignore this interrupt for now,
699 * which we can do since it is edge triggered. It will be
700 * acknowledged later by fs_enet_open().
701 */
702 if (!fep->phy)
703 return IRQ_NONE;
704
705 fs_mii_ack_int(dev);
706 fs_mii_link_status_change_check(dev, 0);
707
708 return IRQ_HANDLED;
709}
710
711static void fs_timeout(struct net_device *dev)
712{
713 struct fs_enet_private *fep = netdev_priv(dev);
714 unsigned long flags;
715 int wake = 0;
716
717 fep->stats.tx_errors++;
718
719 spin_lock_irqsave(&fep->lock, flags);
720
721 if (dev->flags & IFF_UP) {
722 (*fep->ops->stop)(dev);
723 (*fep->ops->restart)(dev);
724 }
725
726 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
727 spin_unlock_irqrestore(&fep->lock, flags);
728
729 if (wake)
730 netif_wake_queue(dev);
731}
732
733static int fs_enet_open(struct net_device *dev)
734{
735 struct fs_enet_private *fep = netdev_priv(dev);
736 const struct fs_platform_info *fpi = fep->fpi;
737 int r;
738
739 /* Install our interrupt handler. */
740 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
741 if (r != 0) {
742 printk(KERN_ERR DRV_MODULE_NAME
743 ": %s Could not allocate FEC IRQ!", dev->name);
744 return -EINVAL;
745 }
746
747 /* Install our phy interrupt handler */
748 if (fpi->phy_irq != -1) {
749
750 r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
751 if (r != 0) {
752 printk(KERN_ERR DRV_MODULE_NAME
753 ": %s Could not allocate PHY IRQ!", dev->name);
754 fs_free_irq(dev, fep->interrupt);
755 return -EINVAL;
756 }
757 }
758
759 fs_mii_startup(dev);
760 netif_carrier_off(dev);
761 fs_mii_link_status_change_check(dev, 1);
762
763 return 0;
764}
765
766static int fs_enet_close(struct net_device *dev)
767{
768 struct fs_enet_private *fep = netdev_priv(dev);
769 const struct fs_platform_info *fpi = fep->fpi;
770 unsigned long flags;
771
772 netif_stop_queue(dev);
773 netif_carrier_off(dev);
774 fs_mii_shutdown(dev);
775
776 spin_lock_irqsave(&fep->lock, flags);
777 (*fep->ops->stop)(dev);
778 spin_unlock_irqrestore(&fep->lock, flags);
779
780 /* release any irqs */
781 if (fpi->phy_irq != -1)
782 fs_free_irq(dev, fpi->phy_irq);
783 fs_free_irq(dev, fep->interrupt);
784
785 return 0;
786}
787
788static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
789{
790 struct fs_enet_private *fep = netdev_priv(dev);
791 return &fep->stats;
792}
793
794/*************************************************************************/
795
796static void fs_get_drvinfo(struct net_device *dev,
797 struct ethtool_drvinfo *info)
798{
799 strcpy(info->driver, DRV_MODULE_NAME);
800 strcpy(info->version, DRV_MODULE_VERSION);
801}
802
803static int fs_get_regs_len(struct net_device *dev)
804{
805 struct fs_enet_private *fep = netdev_priv(dev);
806
807 return (*fep->ops->get_regs_len)(dev);
808}
809
810static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
811 void *p)
812{
813 struct fs_enet_private *fep = netdev_priv(dev);
814 unsigned long flags;
815 int r, len;
816
817 len = regs->len;
818
819 spin_lock_irqsave(&fep->lock, flags);
820 r = (*fep->ops->get_regs)(dev, p, &len);
821 spin_unlock_irqrestore(&fep->lock, flags);
822
823 if (r == 0)
824 regs->version = 0;
825}
826
827static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
828{
829 struct fs_enet_private *fep = netdev_priv(dev);
830 unsigned long flags;
831 int rc;
832
833 spin_lock_irqsave(&fep->lock, flags);
834 rc = mii_ethtool_gset(&fep->mii_if, cmd);
835 spin_unlock_irqrestore(&fep->lock, flags);
836
837 return rc;
838}
839
840static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
841{
842 struct fs_enet_private *fep = netdev_priv(dev);
843 unsigned long flags;
844 int rc;
845
846 spin_lock_irqsave(&fep->lock, flags);
847 rc = mii_ethtool_sset(&fep->mii_if, cmd);
848 spin_unlock_irqrestore(&fep->lock, flags);
849
850 return rc;
851}
852
853static int fs_nway_reset(struct net_device *dev)
854{
855 struct fs_enet_private *fep = netdev_priv(dev);
856 return mii_nway_restart(&fep->mii_if);
857}
858
859static u32 fs_get_msglevel(struct net_device *dev)
860{
861 struct fs_enet_private *fep = netdev_priv(dev);
862 return fep->msg_enable;
863}
864
865static void fs_set_msglevel(struct net_device *dev, u32 value)
866{
867 struct fs_enet_private *fep = netdev_priv(dev);
868 fep->msg_enable = value;
869}
870
871static struct ethtool_ops fs_ethtool_ops = {
872 .get_drvinfo = fs_get_drvinfo,
873 .get_regs_len = fs_get_regs_len,
874 .get_settings = fs_get_settings,
875 .set_settings = fs_set_settings,
876 .nway_reset = fs_nway_reset,
877 .get_link = ethtool_op_get_link,
878 .get_msglevel = fs_get_msglevel,
879 .set_msglevel = fs_set_msglevel,
880 .get_tx_csum = ethtool_op_get_tx_csum,
881 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
882 .get_sg = ethtool_op_get_sg,
883 .set_sg = ethtool_op_set_sg,
884 .get_regs = fs_get_regs,
885};
886
887static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
888{
889 struct fs_enet_private *fep = netdev_priv(dev);
890 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
891 unsigned long flags;
892 int rc;
893
894 if (!netif_running(dev))
895 return -EINVAL;
896
897 spin_lock_irqsave(&fep->lock, flags);
898 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL);
899 spin_unlock_irqrestore(&fep->lock, flags);
900 return rc;
901}
902
903extern int fs_mii_connect(struct net_device *dev);
904extern void fs_mii_disconnect(struct net_device *dev);
905
906static struct net_device *fs_init_instance(struct device *dev,
907 const struct fs_platform_info *fpi)
908{
909 struct net_device *ndev = NULL;
910 struct fs_enet_private *fep = NULL;
911 int privsize, i, r, err = 0, registered = 0;
912
913 /* guard */
914 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
915 return ERR_PTR(-EINVAL);
916
917 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
918 (fpi->rx_ring + fpi->tx_ring));
919
920 ndev = alloc_etherdev(privsize);
921 if (!ndev) {
922 err = -ENOMEM;
923 goto err;
924 }
925 SET_MODULE_OWNER(ndev);
926
927 fep = netdev_priv(ndev);
928 memset(fep, 0, privsize); /* clear everything */
929
930 fep->dev = dev;
931 dev_set_drvdata(dev, ndev);
932 fep->fpi = fpi;
933 if (fpi->init_ioports)
934 fpi->init_ioports();
935
936#ifdef CONFIG_FS_ENET_HAS_FEC
937 if (fs_get_fec_index(fpi->fs_no) >= 0)
938 fep->ops = &fs_fec_ops;
939#endif
940
941#ifdef CONFIG_FS_ENET_HAS_SCC
942 if (fs_get_scc_index(fpi->fs_no) >=0 )
943 fep->ops = &fs_scc_ops;
944#endif
945
946#ifdef CONFIG_FS_ENET_HAS_FCC
947 if (fs_get_fcc_index(fpi->fs_no) >= 0)
948 fep->ops = &fs_fcc_ops;
949#endif
950
951 if (fep->ops == NULL) {
952 printk(KERN_ERR DRV_MODULE_NAME
953 ": %s No matching ops found (%d).\n",
954 ndev->name, fpi->fs_no);
955 err = -EINVAL;
956 goto err;
957 }
958
959 r = (*fep->ops->setup_data)(ndev);
960 if (r != 0) {
961 printk(KERN_ERR DRV_MODULE_NAME
962 ": %s setup_data failed\n",
963 ndev->name);
964 err = r;
965 goto err;
966 }
967
968 /* point rx_skbuff, tx_skbuff */
969 fep->rx_skbuff = (struct sk_buff **)&fep[1];
970 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
971
972 /* init locks */
973 spin_lock_init(&fep->lock);
974 spin_lock_init(&fep->tx_lock);
975
976 /*
977 * Set the Ethernet address.
978 */
979 for (i = 0; i < 6; i++)
980 ndev->dev_addr[i] = fpi->macaddr[i];
981
982 r = (*fep->ops->allocate_bd)(ndev);
983
984 if (fep->ring_base == NULL) {
985 printk(KERN_ERR DRV_MODULE_NAME
986 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
987 err = r;
988 goto err;
989 }
990
991 /*
992 * Set receive and transmit descriptor base.
993 */
994 fep->rx_bd_base = fep->ring_base;
995 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
996
997 /* initialize ring size variables */
998 fep->tx_ring = fpi->tx_ring;
999 fep->rx_ring = fpi->rx_ring;
1000
1001 /*
1002 * The FEC Ethernet specific entries in the device structure.
1003 */
1004 ndev->open = fs_enet_open;
1005 ndev->hard_start_xmit = fs_enet_start_xmit;
1006 ndev->tx_timeout = fs_timeout;
1007 ndev->watchdog_timeo = 2 * HZ;
1008 ndev->stop = fs_enet_close;
1009 ndev->get_stats = fs_enet_get_stats;
1010 ndev->set_multicast_list = fs_set_multicast_list;
1011 if (fpi->use_napi) {
1012 ndev->poll = fs_enet_rx_napi;
1013 ndev->weight = fpi->napi_weight;
1014 }
1015 ndev->ethtool_ops = &fs_ethtool_ops;
1016 ndev->do_ioctl = fs_ioctl;
1017
1018 init_timer(&fep->phy_timer_list);
1019
1020 netif_carrier_off(ndev);
1021
1022 err = register_netdev(ndev);
1023 if (err != 0) {
1024 printk(KERN_ERR DRV_MODULE_NAME
1025 ": %s register_netdev failed.\n", ndev->name);
1026 goto err;
1027 }
1028 registered = 1;
1029
1030 err = fs_mii_connect(ndev);
1031 if (err != 0) {
1032 printk(KERN_ERR DRV_MODULE_NAME
1033 ": %s fs_mii_connect failed.\n", ndev->name);
1034 goto err;
1035 }
1036
1037 return ndev;
1038
1039 err:
1040 if (ndev != NULL) {
1041
1042 if (registered)
1043 unregister_netdev(ndev);
1044
1045 if (fep != NULL) {
1046 (*fep->ops->free_bd)(ndev);
1047 (*fep->ops->cleanup_data)(ndev);
1048 }
1049
1050 free_netdev(ndev);
1051 }
1052
1053 dev_set_drvdata(dev, NULL);
1054
1055 return ERR_PTR(err);
1056}
1057
1058static int fs_cleanup_instance(struct net_device *ndev)
1059{
1060 struct fs_enet_private *fep;
1061 const struct fs_platform_info *fpi;
1062 struct device *dev;
1063
1064 if (ndev == NULL)
1065 return -EINVAL;
1066
1067 fep = netdev_priv(ndev);
1068 if (fep == NULL)
1069 return -EINVAL;
1070
1071 fpi = fep->fpi;
1072
1073 fs_mii_disconnect(ndev);
1074
1075 unregister_netdev(ndev);
1076
1077 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1078 fep->ring_base, fep->ring_mem_addr);
1079
1080 /* reset it */
1081 (*fep->ops->cleanup_data)(ndev);
1082
1083 dev = fep->dev;
1084 if (dev != NULL) {
1085 dev_set_drvdata(dev, NULL);
1086 fep->dev = NULL;
1087 }
1088
1089 free_netdev(ndev);
1090
1091 return 0;
1092}
1093
1094/**************************************************************************************/
1095
1096/* handy pointer to the immap */
1097void *fs_enet_immap = NULL;
1098
1099static int setup_immap(void)
1100{
1101 phys_addr_t paddr = 0;
1102 unsigned long size = 0;
1103
1104#ifdef CONFIG_CPM1
1105 paddr = IMAP_ADDR;
1106 size = 0x10000; /* map 64K */
1107#endif
1108
1109#ifdef CONFIG_CPM2
1110 paddr = CPM_MAP_ADDR;
1111 size = 0x40000; /* map 256 K */
1112#endif
1113 fs_enet_immap = ioremap(paddr, size);
1114 if (fs_enet_immap == NULL)
1115 return -EBADF; /* XXX ahem; maybe just BUG_ON? */
1116
1117 return 0;
1118}
1119
1120static void cleanup_immap(void)
1121{
1122 if (fs_enet_immap != NULL) {
1123 iounmap(fs_enet_immap);
1124 fs_enet_immap = NULL;
1125 }
1126}
1127
1128/**************************************************************************************/
1129
1130static int __devinit fs_enet_probe(struct device *dev)
1131{
1132 struct net_device *ndev;
1133
1134 /* no fixup - no device */
1135 if (dev->platform_data == NULL) {
1136 printk(KERN_INFO "fs_enet: "
1137 "probe called with no platform data; "
1138 "remove unused devices\n");
1139 return -ENODEV;
1140 }
1141
1142 ndev = fs_init_instance(dev, dev->platform_data);
1143 if (IS_ERR(ndev))
1144 return PTR_ERR(ndev);
1145 return 0;
1146}
1147
1148static int fs_enet_remove(struct device *dev)
1149{
1150 return fs_cleanup_instance(dev_get_drvdata(dev));
1151}
1152
1153static struct device_driver fs_enet_fec_driver = {
1154 .name = "fsl-cpm-fec",
1155 .bus = &platform_bus_type,
1156 .probe = fs_enet_probe,
1157 .remove = fs_enet_remove,
1158#ifdef CONFIG_PM
1159/* .suspend = fs_enet_suspend, TODO */
1160/* .resume = fs_enet_resume, TODO */
1161#endif
1162};
1163
1164static struct device_driver fs_enet_scc_driver = {
1165 .name = "fsl-cpm-scc",
1166 .bus = &platform_bus_type,
1167 .probe = fs_enet_probe,
1168 .remove = fs_enet_remove,
1169#ifdef CONFIG_PM
1170/* .suspend = fs_enet_suspend, TODO */
1171/* .resume = fs_enet_resume, TODO */
1172#endif
1173};
1174
1175static struct device_driver fs_enet_fcc_driver = {
1176 .name = "fsl-cpm-fcc",
1177 .bus = &platform_bus_type,
1178 .probe = fs_enet_probe,
1179 .remove = fs_enet_remove,
1180#ifdef CONFIG_PM
1181/* .suspend = fs_enet_suspend, TODO */
1182/* .resume = fs_enet_resume, TODO */
1183#endif
1184};
1185
1186static int __init fs_init(void)
1187{
1188 int r;
1189
1190 printk(KERN_INFO
1191 "%s", version);
1192
1193 r = setup_immap();
1194 if (r != 0)
1195 return r;
1196 r = driver_register(&fs_enet_fec_driver);
1197 if (r != 0)
1198 goto err;
1199
1200 r = driver_register(&fs_enet_fcc_driver);
1201 if (r != 0)
1202 goto err;
1203
1204 r = driver_register(&fs_enet_scc_driver);
1205 if (r != 0)
1206 goto err;
1207
1208 return 0;
1209err:
1210 cleanup_immap();
1211 return r;
1212
1213}
1214
1215static void __exit fs_cleanup(void)
1216{
1217 driver_unregister(&fs_enet_fec_driver);
1218 driver_unregister(&fs_enet_fcc_driver);
1219 driver_unregister(&fs_enet_scc_driver);
1220 cleanup_immap();
1221}
1222
1223/**************************************************************************************/
1224
1225module_init(fs_init);
1226module_exit(fs_cleanup);
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
new file mode 100644
index 000000000000..c6770377ef87
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -0,0 +1,507 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18
19#include <linux/config.h>
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/string.h>
25#include <linux/ptrace.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/pci.h>
31#include <linux/init.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/spinlock.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39#include <linux/bitops.h>
40
41#include <asm/pgtable.h>
42#include <asm/irq.h>
43#include <asm/uaccess.h>
44
45#include "fs_enet.h"
46
47/*************************************************/
48
49/*
50 * Generic PHY support.
51 * Should work for all PHYs, but link change is detected by polling
52 */
53
54static void generic_timer_callback(unsigned long data)
55{
56 struct net_device *dev = (struct net_device *)data;
57 struct fs_enet_private *fep = netdev_priv(dev);
58
59 fep->phy_timer_list.expires = jiffies + HZ / 2;
60
61 add_timer(&fep->phy_timer_list);
62
63 fs_mii_link_status_change_check(dev, 0);
64}
65
66static void generic_startup(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
71 fep->phy_timer_list.data = (unsigned long)dev;
72 fep->phy_timer_list.function = generic_timer_callback;
73 add_timer(&fep->phy_timer_list);
74}
75
76static void generic_shutdown(struct net_device *dev)
77{
78 struct fs_enet_private *fep = netdev_priv(dev);
79
80 del_timer_sync(&fep->phy_timer_list);
81}
82
83/* ------------------------------------------------------------------------- */
84/* The Davicom DM9161 is used on the NETTA board */
85
86/* register definitions */
87
88#define MII_DM9161_ANAR 4 /* Aux. Config Register */
89#define MII_DM9161_ACR 16 /* Aux. Config Register */
90#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
91#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
92#define MII_DM9161_INTR 21 /* Interrupt Register */
93#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
94#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
95
96static void dm9161_startup(struct net_device *dev)
97{
98 struct fs_enet_private *fep = netdev_priv(dev);
99
100 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
101 /* Start autonegotiation */
102 fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
103
104 set_current_state(TASK_UNINTERRUPTIBLE);
105 schedule_timeout(HZ*8);
106}
107
108static void dm9161_ack_int(struct net_device *dev)
109{
110 struct fs_enet_private *fep = netdev_priv(dev);
111
112 fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
113}
114
115static void dm9161_shutdown(struct net_device *dev)
116{
117 struct fs_enet_private *fep = netdev_priv(dev);
118
119 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
120}
121
122/**********************************************************************************/
123
124static const struct phy_info phy_info[] = {
125 {
126 .id = 0x00181b88,
127 .name = "DM9161",
128 .startup = dm9161_startup,
129 .ack_int = dm9161_ack_int,
130 .shutdown = dm9161_shutdown,
131 }, {
132 .id = 0,
133 .name = "GENERIC",
134 .startup = generic_startup,
135 .shutdown = generic_shutdown,
136 },
137};
138
139/**********************************************************************************/
140
141static int phy_id_detect(struct net_device *dev)
142{
143 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145 struct fs_enet_mii_bus *bus = fep->mii_bus;
146 int i, r, start, end, phytype, physubtype;
147 const struct phy_info *phy;
148 int phy_hwid, phy_id;
149
150 phy_hwid = -1;
151 fep->phy = NULL;
152
153 /* auto-detect? */
154 if (fpi->phy_addr == -1) {
155 start = 1;
156 end = 32;
157 } else { /* direct */
158 start = fpi->phy_addr;
159 end = start + 1;
160 }
161
162 for (phy_id = start; phy_id < end; phy_id++) {
163 /* skip already used phy addresses on this bus */
164 if (bus->usage_map & (1 << phy_id))
165 continue;
166 r = fs_mii_read(dev, phy_id, MII_PHYSID1);
167 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
168 continue;
169 r = fs_mii_read(dev, phy_id, MII_PHYSID2);
170 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
171 continue;
172 phy_hwid = (phytype << 16) | physubtype;
173 if (phy_hwid != -1)
174 break;
175 }
176
177 if (phy_hwid == -1) {
178 printk(KERN_ERR DRV_MODULE_NAME
179 ": %s No PHY detected! range=0x%02x-0x%02x\n",
180 dev->name, start, end);
181 return -1;
182 }
183
184 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
185 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
186 break;
187
188 if (i >= ARRAY_SIZE(phy_info)) {
189 printk(KERN_ERR DRV_MODULE_NAME
190 ": %s PHY id 0x%08x is not supported!\n",
191 dev->name, phy_hwid);
192 return -1;
193 }
194
195 fep->phy = phy;
196
197 /* mark this address as used */
198 bus->usage_map |= (1 << phy_id);
199
200 printk(KERN_INFO DRV_MODULE_NAME
201 ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
202 dev->name, phy_id, fep->phy->name, phy_hwid,
203 fpi->phy_addr == -1 ? " (auto-detected)" : "");
204
205 return phy_id;
206}
207
208void fs_mii_startup(struct net_device *dev)
209{
210 struct fs_enet_private *fep = netdev_priv(dev);
211
212 if (fep->phy->startup)
213 (*fep->phy->startup) (dev);
214}
215
216void fs_mii_shutdown(struct net_device *dev)
217{
218 struct fs_enet_private *fep = netdev_priv(dev);
219
220 if (fep->phy->shutdown)
221 (*fep->phy->shutdown) (dev);
222}
223
224void fs_mii_ack_int(struct net_device *dev)
225{
226 struct fs_enet_private *fep = netdev_priv(dev);
227
228 if (fep->phy->ack_int)
229 (*fep->phy->ack_int) (dev);
230}
231
232#define MII_LINK 0x0001
233#define MII_HALF 0x0002
234#define MII_FULL 0x0004
235#define MII_BASE4 0x0008
236#define MII_10M 0x0010
237#define MII_100M 0x0020
238#define MII_1G 0x0040
239#define MII_10G 0x0080
240
241/* return full mii info at one gulp, with a usable form */
242static unsigned int mii_full_status(struct mii_if_info *mii)
243{
244 unsigned int status;
245 int bmsr, adv, lpa, neg;
246 struct fs_enet_private* fep = netdev_priv(mii->dev);
247
248 /* first, a dummy read, needed to latch some MII phys */
249 (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
250 bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
251
252 /* no link */
253 if ((bmsr & BMSR_LSTATUS) == 0)
254 return 0;
255
256 status = MII_LINK;
257
258 /* Lets look what ANEG says if it's supported - otherwize we shall
259 take the right values from the platform info*/
260 if(!mii->force_media) {
261 /* autoneg not completed; don't bother */
262 if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
263 return 0;
264
265 adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
266 lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
267
268 neg = lpa & adv;
269 } else {
270 neg = fep->fpi->bus_info->lpa;
271 }
272
273 if (neg & LPA_100FULL)
274 status |= MII_FULL | MII_100M;
275 else if (neg & LPA_100BASE4)
276 status |= MII_FULL | MII_BASE4 | MII_100M;
277 else if (neg & LPA_100HALF)
278 status |= MII_HALF | MII_100M;
279 else if (neg & LPA_10FULL)
280 status |= MII_FULL | MII_10M;
281 else
282 status |= MII_HALF | MII_10M;
283
284 return status;
285}
286
287void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
288{
289 struct fs_enet_private *fep = netdev_priv(dev);
290 struct mii_if_info *mii = &fep->mii_if;
291 unsigned int mii_status;
292 int ok_to_print, link, duplex, speed;
293 unsigned long flags;
294
295 ok_to_print = netif_msg_link(fep);
296
297 mii_status = mii_full_status(mii);
298
299 if (!init_media && mii_status == fep->last_mii_status)
300 return;
301
302 fep->last_mii_status = mii_status;
303
304 link = !!(mii_status & MII_LINK);
305 duplex = !!(mii_status & MII_FULL);
306 speed = (mii_status & MII_100M) ? 100 : 10;
307
308 if (link == 0) {
309 netif_carrier_off(mii->dev);
310 netif_stop_queue(dev);
311 if (!init_media) {
312 spin_lock_irqsave(&fep->lock, flags);
313 (*fep->ops->stop)(dev);
314 spin_unlock_irqrestore(&fep->lock, flags);
315 }
316
317 if (ok_to_print)
318 printk(KERN_INFO "%s: link down\n", mii->dev->name);
319
320 } else {
321
322 mii->full_duplex = duplex;
323
324 netif_carrier_on(mii->dev);
325
326 spin_lock_irqsave(&fep->lock, flags);
327 fep->duplex = duplex;
328 fep->speed = speed;
329 (*fep->ops->restart)(dev);
330 spin_unlock_irqrestore(&fep->lock, flags);
331
332 netif_start_queue(dev);
333
334 if (ok_to_print)
335 printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
336 dev->name, speed, duplex ? "full" : "half");
337 }
338}
339
340/**********************************************************************************/
341
342int fs_mii_read(struct net_device *dev, int phy_id, int location)
343{
344 struct fs_enet_private *fep = netdev_priv(dev);
345 struct fs_enet_mii_bus *bus = fep->mii_bus;
346
347 unsigned long flags;
348 int ret;
349
350 spin_lock_irqsave(&bus->mii_lock, flags);
351 ret = (*bus->mii_read)(bus, phy_id, location);
352 spin_unlock_irqrestore(&bus->mii_lock, flags);
353
354 return ret;
355}
356
357void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
358{
359 struct fs_enet_private *fep = netdev_priv(dev);
360 struct fs_enet_mii_bus *bus = fep->mii_bus;
361 unsigned long flags;
362
363 spin_lock_irqsave(&bus->mii_lock, flags);
364 (*bus->mii_write)(bus, phy_id, location, value);
365 spin_unlock_irqrestore(&bus->mii_lock, flags);
366}
367
368/*****************************************************************************/
369
370/* list of all registered mii buses */
371static LIST_HEAD(fs_mii_bus_list);
372
373static struct fs_enet_mii_bus *lookup_bus(int method, int id)
374{
375 struct list_head *ptr;
376 struct fs_enet_mii_bus *bus;
377
378 list_for_each(ptr, &fs_mii_bus_list) {
379 bus = list_entry(ptr, struct fs_enet_mii_bus, list);
380 if (bus->bus_info->method == method &&
381 bus->bus_info->id == id)
382 return bus;
383 }
384 return NULL;
385}
386
387static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
388{
389 struct fs_enet_mii_bus *bus;
390 int ret = 0;
391
392 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
393 if (bus == NULL) {
394 ret = -ENOMEM;
395 goto err;
396 }
397 memset(bus, 0, sizeof(*bus));
398 spin_lock_init(&bus->mii_lock);
399 bus->bus_info = bi;
400 bus->refs = 0;
401 bus->usage_map = 0;
402
403 /* perform initialization */
404 switch (bi->method) {
405
406 case fsmii_fixed:
407 ret = fs_mii_fixed_init(bus);
408 if (ret != 0)
409 goto err;
410 break;
411
412 case fsmii_bitbang:
413 ret = fs_mii_bitbang_init(bus);
414 if (ret != 0)
415 goto err;
416 break;
417#ifdef CONFIG_FS_ENET_HAS_FEC
418 case fsmii_fec:
419 ret = fs_mii_fec_init(bus);
420 if (ret != 0)
421 goto err;
422 break;
423#endif
424 default:
425 ret = -EINVAL;
426 goto err;
427 }
428
429 list_add(&bus->list, &fs_mii_bus_list);
430
431 return bus;
432
433err:
434 if (bus)
435 kfree(bus);
436 return ERR_PTR(ret);
437}
438
439static void destroy_bus(struct fs_enet_mii_bus *bus)
440{
441 /* remove from bus list */
442 list_del(&bus->list);
443
444 /* nothing more needed */
445 kfree(bus);
446}
447
448int fs_mii_connect(struct net_device *dev)
449{
450 struct fs_enet_private *fep = netdev_priv(dev);
451 const struct fs_platform_info *fpi = fep->fpi;
452 struct fs_enet_mii_bus *bus = NULL;
453
454 /* check method validity */
455 switch (fpi->bus_info->method) {
456 case fsmii_fixed:
457 case fsmii_bitbang:
458 break;
459#ifdef CONFIG_FS_ENET_HAS_FEC
460 case fsmii_fec:
461 break;
462#endif
463 default:
464 printk(KERN_ERR DRV_MODULE_NAME
465 ": %s Unknown MII bus method (%d)!\n",
466 dev->name, fpi->bus_info->method);
467 return -EINVAL;
468 }
469
470 bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
471
472 /* if not found create new bus */
473 if (bus == NULL) {
474 bus = create_bus(fpi->bus_info);
475 if (IS_ERR(bus)) {
476 printk(KERN_ERR DRV_MODULE_NAME
477 ": %s MII bus creation failure!\n", dev->name);
478 return PTR_ERR(bus);
479 }
480 }
481
482 bus->refs++;
483
484 fep->mii_bus = bus;
485
486 fep->mii_if.dev = dev;
487 fep->mii_if.phy_id_mask = 0x1f;
488 fep->mii_if.reg_num_mask = 0x1f;
489 fep->mii_if.mdio_read = fs_mii_read;
490 fep->mii_if.mdio_write = fs_mii_write;
491 fep->mii_if.force_media = fpi->bus_info->disable_aneg;
492 fep->mii_if.phy_id = phy_id_detect(dev);
493
494 return 0;
495}
496
497void fs_mii_disconnect(struct net_device *dev)
498{
499 struct fs_enet_private *fep = netdev_priv(dev);
500 struct fs_enet_mii_bus *bus = NULL;
501
502 bus = fep->mii_bus;
503 fep->mii_bus = NULL;
504
505 if (--bus->refs <= 0)
506 destroy_bus(bus);
507}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
new file mode 100644
index 000000000000..1105543b9d88
--- /dev/null
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -0,0 +1,245 @@
1#ifndef FS_ENET_H
2#define FS_ENET_H
3
4#include <linux/mii.h>
5#include <linux/netdevice.h>
6#include <linux/types.h>
7#include <linux/version.h>
8#include <linux/list.h>
9
10#include <linux/fs_enet_pd.h>
11
12#include <asm/dma-mapping.h>
13
14#ifdef CONFIG_CPM1
15#include <asm/commproc.h>
16#endif
17
18#ifdef CONFIG_CPM2
19#include <asm/cpm2.h>
20#endif
21
22/* hw driver ops */
23struct fs_ops {
24 int (*setup_data)(struct net_device *dev);
25 int (*allocate_bd)(struct net_device *dev);
26 void (*free_bd)(struct net_device *dev);
27 void (*cleanup_data)(struct net_device *dev);
28 void (*set_multicast_list)(struct net_device *dev);
29 void (*restart)(struct net_device *dev);
30 void (*stop)(struct net_device *dev);
31 void (*pre_request_irq)(struct net_device *dev, int irq);
32 void (*post_free_irq)(struct net_device *dev, int irq);
33 void (*napi_clear_rx_event)(struct net_device *dev);
34 void (*napi_enable_rx)(struct net_device *dev);
35 void (*napi_disable_rx)(struct net_device *dev);
36 void (*rx_bd_done)(struct net_device *dev);
37 void (*tx_kickstart)(struct net_device *dev);
38 u32 (*get_int_events)(struct net_device *dev);
39 void (*clear_int_events)(struct net_device *dev, u32 int_events);
40 void (*ev_error)(struct net_device *dev, u32 int_events);
41 int (*get_regs)(struct net_device *dev, void *p, int *sizep);
42 int (*get_regs_len)(struct net_device *dev);
43 void (*tx_restart)(struct net_device *dev);
44};
45
46struct phy_info {
47 unsigned int id;
48 const char *name;
49 void (*startup) (struct net_device * dev);
50 void (*shutdown) (struct net_device * dev);
51 void (*ack_int) (struct net_device * dev);
52};
53
54/* The FEC stores dest/src/type, data, and checksum for receive packets.
55 */
56#define MAX_MTU 1508 /* Allow fullsized pppoe packets over VLAN */
57#define MIN_MTU 46 /* this is data size */
58#define CRC_LEN 4
59
60#define PKT_MAXBUF_SIZE (MAX_MTU+ETH_HLEN+CRC_LEN)
61#define PKT_MINBUF_SIZE (MIN_MTU+ETH_HLEN+CRC_LEN)
62
63/* Must be a multiple of 32 (to cover both FEC & FCC) */
64#define PKT_MAXBLR_SIZE ((PKT_MAXBUF_SIZE + 31) & ~31)
65/* This is needed so that invalidate_xxx wont invalidate too much */
66#define ENET_RX_FRSIZE L1_CACHE_ALIGN(PKT_MAXBUF_SIZE)
67
68struct fs_enet_mii_bus {
69 struct list_head list;
70 spinlock_t mii_lock;
71 const struct fs_mii_bus_info *bus_info;
72 int refs;
73 u32 usage_map;
74
75 int (*mii_read)(struct fs_enet_mii_bus *bus,
76 int phy_id, int location);
77
78 void (*mii_write)(struct fs_enet_mii_bus *bus,
79 int phy_id, int location, int value);
80
81 union {
82 struct {
83 unsigned int mii_speed;
84 void *fecp;
85 } fec;
86
87 struct {
88 /* note that the actual port size may */
89 /* be different; cpm(s) handle it OK */
90 u8 mdio_msk;
91 u8 *mdio_dir;
92 u8 *mdio_dat;
93 u8 mdc_msk;
94 u8 *mdc_dir;
95 u8 *mdc_dat;
96 } bitbang;
97
98 struct {
99 u16 lpa;
100 } fixed;
101 };
102};
103
104int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
105int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
106int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
107
108struct fs_enet_private {
109 struct device *dev; /* pointer back to the device (must be initialized first) */
110 spinlock_t lock; /* during all ops except TX pckt processing */
111 spinlock_t tx_lock; /* during fs_start_xmit and fs_tx */
112 const struct fs_platform_info *fpi;
113 const struct fs_ops *ops;
114 int rx_ring, tx_ring;
115 dma_addr_t ring_mem_addr;
116 void *ring_base;
117 struct sk_buff **rx_skbuff;
118 struct sk_buff **tx_skbuff;
119 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
120 cbd_t *tx_bd_base;
121 cbd_t *dirty_tx; /* ring entries to be free()ed. */
122 cbd_t *cur_rx;
123 cbd_t *cur_tx;
124 int tx_free;
125 struct net_device_stats stats;
126 struct timer_list phy_timer_list;
127 const struct phy_info *phy;
128 u32 msg_enable;
129 struct mii_if_info mii_if;
130 unsigned int last_mii_status;
131 struct fs_enet_mii_bus *mii_bus;
132 int interrupt;
133
134 int duplex, speed; /* current settings */
135
136 /* event masks */
137 u32 ev_napi_rx; /* mask of NAPI rx events */
138 u32 ev_rx; /* rx event mask */
139 u32 ev_tx; /* tx event mask */
140 u32 ev_err; /* error event mask */
141
142 u16 bd_rx_empty; /* mask of BD rx empty */
143 u16 bd_rx_err; /* mask of BD rx errors */
144
145 union {
146 struct {
147 int idx; /* FEC1 = 0, FEC2 = 1 */
148 void *fecp; /* hw registers */
149 u32 hthi, htlo; /* state for multicast */
150 } fec;
151
152 struct {
153 int idx; /* FCC1-3 = 0-2 */
154 void *fccp; /* hw registers */
155 void *ep; /* parameter ram */
156 void *fcccp; /* hw registers cont. */
157 void *mem; /* FCC DPRAM */
158 u32 gaddrh, gaddrl; /* group address */
159 } fcc;
160
161 struct {
162 int idx; /* FEC1 = 0, FEC2 = 1 */
163 void *sccp; /* hw registers */
164 void *ep; /* parameter ram */
165 u32 hthi, htlo; /* state for multicast */
166 } scc;
167
168 };
169};
170
171/***************************************************************************/
172
173int fs_mii_read(struct net_device *dev, int phy_id, int location);
174void fs_mii_write(struct net_device *dev, int phy_id, int location, int value);
175
176void fs_mii_startup(struct net_device *dev);
177void fs_mii_shutdown(struct net_device *dev);
178void fs_mii_ack_int(struct net_device *dev);
179
180void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
181
182void fs_init_bds(struct net_device *dev);
183void fs_cleanup_bds(struct net_device *dev);
184
185/***************************************************************************/
186
187#define DRV_MODULE_NAME "fs_enet"
188#define PFX DRV_MODULE_NAME ": "
189#define DRV_MODULE_VERSION "1.0"
190#define DRV_MODULE_RELDATE "Aug 8, 2005"
191
192/***************************************************************************/
193
194int fs_enet_platform_init(void);
195void fs_enet_platform_cleanup(void);
196
197/***************************************************************************/
198
199/* buffer descriptor access macros */
200
201/* access macros */
202#if defined(CONFIG_CPM1)
203/* for a a CPM1 __raw_xxx's are sufficient */
204#define __cbd_out32(addr, x) __raw_writel(x, addr)
205#define __cbd_out16(addr, x) __raw_writew(x, addr)
206#define __cbd_in32(addr) __raw_readl(addr)
207#define __cbd_in16(addr) __raw_readw(addr)
208#else
209/* for others play it safe */
210#define __cbd_out32(addr, x) out_be32(addr, x)
211#define __cbd_out16(addr, x) out_be16(addr, x)
212#define __cbd_in32(addr) in_be32(addr)
213#define __cbd_in16(addr) in_be16(addr)
214#endif
215
216/* write */
217#define CBDW_SC(_cbd, _sc) __cbd_out16(&(_cbd)->cbd_sc, (_sc))
218#define CBDW_DATLEN(_cbd, _datlen) __cbd_out16(&(_cbd)->cbd_datlen, (_datlen))
219#define CBDW_BUFADDR(_cbd, _bufaddr) __cbd_out32(&(_cbd)->cbd_bufaddr, (_bufaddr))
220
221/* read */
222#define CBDR_SC(_cbd) __cbd_in16(&(_cbd)->cbd_sc)
223#define CBDR_DATLEN(_cbd) __cbd_in16(&(_cbd)->cbd_datlen)
224#define CBDR_BUFADDR(_cbd) __cbd_in32(&(_cbd)->cbd_bufaddr)
225
226/* set bits */
227#define CBDS_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) | (_sc))
228
229/* clear bits */
230#define CBDC_SC(_cbd, _sc) CBDW_SC(_cbd, CBDR_SC(_cbd) & ~(_sc))
231
232/*******************************************************************/
233
234extern const struct fs_ops fs_fec_ops;
235extern const struct fs_ops fs_fcc_ops;
236extern const struct fs_ops fs_scc_ops;
237
238/*******************************************************************/
239
240/* handy pointer to the immap */
241extern void *fs_enet_immap;
242
243/*******************************************************************/
244
245#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
new file mode 100644
index 000000000000..a940b96433c7
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -0,0 +1,578 @@
1/*
2 * FCC driver for Motorola MPC82xx (PQ2).
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/immap_cpm2.h>
39#include <asm/mpc8260.h>
40#include <asm/cpm2.h>
41
42#include <asm/pgtable.h>
43#include <asm/irq.h>
44#include <asm/uaccess.h>
45
46#include "fs_enet.h"
47
48/*************************************************/
49
50/* FCC access macros */
51
52#define __fcc_out32(addr, x) out_be32((unsigned *)addr, x)
53#define __fcc_out16(addr, x) out_be16((unsigned short *)addr, x)
54#define __fcc_out8(addr, x) out_8((unsigned char *)addr, x)
55#define __fcc_in32(addr) in_be32((unsigned *)addr)
56#define __fcc_in16(addr) in_be16((unsigned short *)addr)
57#define __fcc_in8(addr) in_8((unsigned char *)addr)
58
59/* parameter space */
60
61/* write, read, set bits, clear bits */
62#define W32(_p, _m, _v) __fcc_out32(&(_p)->_m, (_v))
63#define R32(_p, _m) __fcc_in32(&(_p)->_m)
64#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
65#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
66
67#define W16(_p, _m, _v) __fcc_out16(&(_p)->_m, (_v))
68#define R16(_p, _m) __fcc_in16(&(_p)->_m)
69#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
70#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
71
72#define W8(_p, _m, _v) __fcc_out8(&(_p)->_m, (_v))
73#define R8(_p, _m) __fcc_in8(&(_p)->_m)
74#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
75#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
76
77/*************************************************/
78
79#define FCC_MAX_MULTICAST_ADDRS 64
80
81#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
82#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
83#define mk_mii_end 0
84
85#define MAX_CR_CMD_LOOPS 10000
86
87static inline int fcc_cr_cmd(struct fs_enet_private *fep, u32 mcn, u32 op)
88{
89 const struct fs_platform_info *fpi = fep->fpi;
90
91 cpm2_map_t *immap = fs_enet_immap;
92 cpm_cpm2_t *cpmp = &immap->im_cpm;
93 u32 v;
94 int i;
95
96 /* Currently I don't know what feature call will look like. But
97 I guess there'd be something like do_cpm_cmd() which will require page & sblock */
98 v = mk_cr_cmd(fpi->cp_page, fpi->cp_block, mcn, op);
99 W32(cpmp, cp_cpcr, v | CPM_CR_FLG);
100 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
101 if ((R32(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
102 break;
103
104 if (i >= MAX_CR_CMD_LOOPS) {
105 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
106 __FUNCTION__);
107 return 1;
108 }
109
110 return 0;
111}
112
113static int do_pd_setup(struct fs_enet_private *fep)
114{
115 struct platform_device *pdev = to_platform_device(fep->dev);
116 struct resource *r;
117
118 /* Fill out IRQ field */
119 fep->interrupt = platform_get_irq(pdev, 0);
120
121 /* Attach the memory for the FCC Parameter RAM */
122 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
123 fep->fcc.ep = (void *)r->start;
124
125 if (fep->fcc.ep == NULL)
126 return -EINVAL;
127
128 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
129 fep->fcc.fccp = (void *)r->start;
130
131 if (fep->fcc.fccp == NULL)
132 return -EINVAL;
133
134 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
135
136 if (fep->fcc.fcccp == NULL)
137 return -EINVAL;
138
139 return 0;
140}
141
142#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
143#define FCC_RX_EVENT (FCC_ENET_RXF)
144#define FCC_TX_EVENT (FCC_ENET_TXB)
145#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY)
146
147static int setup_data(struct net_device *dev)
148{
149 struct fs_enet_private *fep = netdev_priv(dev);
150 const struct fs_platform_info *fpi = fep->fpi;
151
152 fep->fcc.idx = fs_get_fcc_index(fpi->fs_no);
153 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
154 return -EINVAL;
155
156 fep->fcc.mem = (void *)fpi->mem_offset;
157
158 if (do_pd_setup(fep) != 0)
159 return -EINVAL;
160
161 fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
162 fep->ev_rx = FCC_RX_EVENT;
163 fep->ev_tx = FCC_TX_EVENT;
164 fep->ev_err = FCC_ERR_EVENT_MSK;
165
166 return 0;
167}
168
169static int allocate_bd(struct net_device *dev)
170{
171 struct fs_enet_private *fep = netdev_priv(dev);
172 const struct fs_platform_info *fpi = fep->fpi;
173
174 fep->ring_base = dma_alloc_coherent(fep->dev,
175 (fpi->tx_ring + fpi->rx_ring) *
176 sizeof(cbd_t), &fep->ring_mem_addr,
177 GFP_KERNEL);
178 if (fep->ring_base == NULL)
179 return -ENOMEM;
180
181 return 0;
182}
183
184static void free_bd(struct net_device *dev)
185{
186 struct fs_enet_private *fep = netdev_priv(dev);
187 const struct fs_platform_info *fpi = fep->fpi;
188
189 if (fep->ring_base)
190 dma_free_coherent(fep->dev,
191 (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
192 fep->ring_base, fep->ring_mem_addr);
193}
194
195static void cleanup_data(struct net_device *dev)
196{
197 /* nothing */
198}
199
200static void set_promiscuous_mode(struct net_device *dev)
201{
202 struct fs_enet_private *fep = netdev_priv(dev);
203 fcc_t *fccp = fep->fcc.fccp;
204
205 S32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
206}
207
208static void set_multicast_start(struct net_device *dev)
209{
210 struct fs_enet_private *fep = netdev_priv(dev);
211 fcc_enet_t *ep = fep->fcc.ep;
212
213 W32(ep, fen_gaddrh, 0);
214 W32(ep, fen_gaddrl, 0);
215}
216
217static void set_multicast_one(struct net_device *dev, const u8 *mac)
218{
219 struct fs_enet_private *fep = netdev_priv(dev);
220 fcc_enet_t *ep = fep->fcc.ep;
221 u16 taddrh, taddrm, taddrl;
222
223 taddrh = ((u16)mac[5] << 8) | mac[4];
224 taddrm = ((u16)mac[3] << 8) | mac[2];
225 taddrl = ((u16)mac[1] << 8) | mac[0];
226
227 W16(ep, fen_taddrh, taddrh);
228 W16(ep, fen_taddrm, taddrm);
229 W16(ep, fen_taddrl, taddrl);
230 fcc_cr_cmd(fep, 0x0C, CPM_CR_SET_GADDR);
231}
232
233static void set_multicast_finish(struct net_device *dev)
234{
235 struct fs_enet_private *fep = netdev_priv(dev);
236 fcc_t *fccp = fep->fcc.fccp;
237 fcc_enet_t *ep = fep->fcc.ep;
238
239 /* clear promiscuous always */
240 C32(fccp, fcc_fpsmr, FCC_PSMR_PRO);
241
242 /* if all multi or too many multicasts; just enable all */
243 if ((dev->flags & IFF_ALLMULTI) != 0 ||
244 dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
245
246 W32(ep, fen_gaddrh, 0xffffffff);
247 W32(ep, fen_gaddrl, 0xffffffff);
248 }
249
250 /* read back */
251 fep->fcc.gaddrh = R32(ep, fen_gaddrh);
252 fep->fcc.gaddrl = R32(ep, fen_gaddrl);
253}
254
255static void set_multicast_list(struct net_device *dev)
256{
257 struct dev_mc_list *pmc;
258
259 if ((dev->flags & IFF_PROMISC) == 0) {
260 set_multicast_start(dev);
261 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
262 set_multicast_one(dev, pmc->dmi_addr);
263 set_multicast_finish(dev);
264 } else
265 set_promiscuous_mode(dev);
266}
267
268static void restart(struct net_device *dev)
269{
270 struct fs_enet_private *fep = netdev_priv(dev);
271 const struct fs_platform_info *fpi = fep->fpi;
272 fcc_t *fccp = fep->fcc.fccp;
273 fcc_c_t *fcccp = fep->fcc.fcccp;
274 fcc_enet_t *ep = fep->fcc.ep;
275 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
276 u16 paddrh, paddrm, paddrl;
277 u16 mem_addr;
278 const unsigned char *mac;
279 int i;
280
281 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
282
283 /* clear everything (slow & steady does it) */
284 for (i = 0; i < sizeof(*ep); i++)
285 __fcc_out8((char *)ep + i, 0);
286
287 /* get physical address */
288 rx_bd_base_phys = fep->ring_mem_addr;
289 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
290
291 /* point to bds */
292 W32(ep, fen_genfcc.fcc_rbase, rx_bd_base_phys);
293 W32(ep, fen_genfcc.fcc_tbase, tx_bd_base_phys);
294
295 /* Set maximum bytes per receive buffer.
296 * It must be a multiple of 32.
297 */
298 W16(ep, fen_genfcc.fcc_mrblr, PKT_MAXBLR_SIZE);
299
300 W32(ep, fen_genfcc.fcc_rstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
301 W32(ep, fen_genfcc.fcc_tstate, (CPMFCR_GBL | CPMFCR_EB) << 24);
302
303 /* Allocate space in the reserved FCC area of DPRAM for the
304 * internal buffers. No one uses this space (yet), so we
305 * can do this. Later, we will add resource management for
306 * this area.
307 */
308
309 mem_addr = (u32) fep->fcc.mem; /* de-fixup dpram offset */
310
311 W16(ep, fen_genfcc.fcc_riptr, (mem_addr & 0xffff));
312 W16(ep, fen_genfcc.fcc_tiptr, ((mem_addr + 32) & 0xffff));
313 W16(ep, fen_padptr, mem_addr + 64);
314
315 /* fill with special symbol... */
316 memset(fep->fcc.mem + fpi->dpram_offset + 64, 0x88, 32);
317
318 W32(ep, fen_genfcc.fcc_rbptr, 0);
319 W32(ep, fen_genfcc.fcc_tbptr, 0);
320 W32(ep, fen_genfcc.fcc_rcrc, 0);
321 W32(ep, fen_genfcc.fcc_tcrc, 0);
322 W16(ep, fen_genfcc.fcc_res1, 0);
323 W32(ep, fen_genfcc.fcc_res2, 0);
324
325 /* no CAM */
326 W32(ep, fen_camptr, 0);
327
328 /* Set CRC preset and mask */
329 W32(ep, fen_cmask, 0xdebb20e3);
330 W32(ep, fen_cpres, 0xffffffff);
331
332 W32(ep, fen_crcec, 0); /* CRC Error counter */
333 W32(ep, fen_alec, 0); /* alignment error counter */
334 W32(ep, fen_disfc, 0); /* discard frame counter */
335 W16(ep, fen_retlim, 15); /* Retry limit threshold */
336 W16(ep, fen_pper, 0); /* Normal persistence */
337
338 /* set group address */
339 W32(ep, fen_gaddrh, fep->fcc.gaddrh);
340 W32(ep, fen_gaddrl, fep->fcc.gaddrh);
341
342 /* Clear hash filter tables */
343 W32(ep, fen_iaddrh, 0);
344 W32(ep, fen_iaddrl, 0);
345
346 /* Clear the Out-of-sequence TxBD */
347 W16(ep, fen_tfcstat, 0);
348 W16(ep, fen_tfclen, 0);
349 W32(ep, fen_tfcptr, 0);
350
351 W16(ep, fen_mflr, PKT_MAXBUF_SIZE); /* maximum frame length register */
352 W16(ep, fen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
353
354 /* set address */
355 mac = dev->dev_addr;
356 paddrh = ((u16)mac[5] << 8) | mac[4];
357 paddrm = ((u16)mac[3] << 8) | mac[2];
358 paddrl = ((u16)mac[1] << 8) | mac[0];
359
360 W16(ep, fen_paddrh, paddrh);
361 W16(ep, fen_paddrm, paddrm);
362 W16(ep, fen_paddrl, paddrl);
363
364 W16(ep, fen_taddrh, 0);
365 W16(ep, fen_taddrm, 0);
366 W16(ep, fen_taddrl, 0);
367
368 W16(ep, fen_maxd1, 1520); /* maximum DMA1 length */
369 W16(ep, fen_maxd2, 1520); /* maximum DMA2 length */
370
371 /* Clear stat counters, in case we ever enable RMON */
372 W32(ep, fen_octc, 0);
373 W32(ep, fen_colc, 0);
374 W32(ep, fen_broc, 0);
375 W32(ep, fen_mulc, 0);
376 W32(ep, fen_uspc, 0);
377 W32(ep, fen_frgc, 0);
378 W32(ep, fen_ospc, 0);
379 W32(ep, fen_jbrc, 0);
380 W32(ep, fen_p64c, 0);
381 W32(ep, fen_p65c, 0);
382 W32(ep, fen_p128c, 0);
383 W32(ep, fen_p256c, 0);
384 W32(ep, fen_p512c, 0);
385 W32(ep, fen_p1024c, 0);
386
387 W16(ep, fen_rfthr, 0); /* Suggested by manual */
388 W16(ep, fen_rfcnt, 0);
389 W16(ep, fen_cftype, 0);
390
391 fs_init_bds(dev);
392
393 /* adjust to speed (for RMII mode) */
394 if (fpi->use_rmii) {
395 if (fep->speed == 100)
396 C8(fcccp, fcc_gfemr, 0x20);
397 else
398 S8(fcccp, fcc_gfemr, 0x20);
399 }
400
401 fcc_cr_cmd(fep, 0x0c, CPM_CR_INIT_TRX);
402
403 /* clear events */
404 W16(fccp, fcc_fcce, 0xffff);
405
406 /* Enable interrupts we wish to service */
407 W16(fccp, fcc_fccm, FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
408
409 /* Set GFMR to enable Ethernet operating mode */
410 W32(fccp, fcc_gfmr, FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
411
412 /* set sync/delimiters */
413 W16(fccp, fcc_fdsr, 0xd555);
414
415 W32(fccp, fcc_fpsmr, FCC_PSMR_ENCRC);
416
417 if (fpi->use_rmii)
418 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
419
420 /* adjust to duplex mode */
421 if (fep->duplex)
422 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
423 else
424 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
425
426 S32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
427}
428
429static void stop(struct net_device *dev)
430{
431 struct fs_enet_private *fep = netdev_priv(dev);
432 fcc_t *fccp = fep->fcc.fccp;
433
434 /* stop ethernet */
435 C32(fccp, fcc_gfmr, FCC_GFMR_ENR | FCC_GFMR_ENT);
436
437 /* clear events */
438 W16(fccp, fcc_fcce, 0xffff);
439
440 /* clear interrupt mask */
441 W16(fccp, fcc_fccm, 0);
442
443 fs_cleanup_bds(dev);
444}
445
446static void pre_request_irq(struct net_device *dev, int irq)
447{
448 /* nothing */
449}
450
451static void post_free_irq(struct net_device *dev, int irq)
452{
453 /* nothing */
454}
455
456static void napi_clear_rx_event(struct net_device *dev)
457{
458 struct fs_enet_private *fep = netdev_priv(dev);
459 fcc_t *fccp = fep->fcc.fccp;
460
461 W16(fccp, fcc_fcce, FCC_NAPI_RX_EVENT_MSK);
462}
463
464static void napi_enable_rx(struct net_device *dev)
465{
466 struct fs_enet_private *fep = netdev_priv(dev);
467 fcc_t *fccp = fep->fcc.fccp;
468
469 S16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
470}
471
472static void napi_disable_rx(struct net_device *dev)
473{
474 struct fs_enet_private *fep = netdev_priv(dev);
475 fcc_t *fccp = fep->fcc.fccp;
476
477 C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
478}
479
480static void rx_bd_done(struct net_device *dev)
481{
482 /* nothing */
483}
484
485static void tx_kickstart(struct net_device *dev)
486{
487 /* nothing */
488}
489
490static u32 get_int_events(struct net_device *dev)
491{
492 struct fs_enet_private *fep = netdev_priv(dev);
493 fcc_t *fccp = fep->fcc.fccp;
494
495 return (u32)R16(fccp, fcc_fcce);
496}
497
498static void clear_int_events(struct net_device *dev, u32 int_events)
499{
500 struct fs_enet_private *fep = netdev_priv(dev);
501 fcc_t *fccp = fep->fcc.fccp;
502
503 W16(fccp, fcc_fcce, int_events & 0xffff);
504}
505
506static void ev_error(struct net_device *dev, u32 int_events)
507{
508 printk(KERN_WARNING DRV_MODULE_NAME
509 ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
510}
511
512int get_regs(struct net_device *dev, void *p, int *sizep)
513{
514 struct fs_enet_private *fep = netdev_priv(dev);
515
516 if (*sizep < sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t))
517 return -EINVAL;
518
519 memcpy_fromio(p, fep->fcc.fccp, sizeof(fcc_t));
520 p = (char *)p + sizeof(fcc_t);
521
522 memcpy_fromio(p, fep->fcc.fcccp, sizeof(fcc_c_t));
523 p = (char *)p + sizeof(fcc_c_t);
524
525 memcpy_fromio(p, fep->fcc.ep, sizeof(fcc_enet_t));
526
527 return 0;
528}
529
530int get_regs_len(struct net_device *dev)
531{
532 return sizeof(fcc_t) + sizeof(fcc_c_t) + sizeof(fcc_enet_t);
533}
534
535/* Some transmit errors cause the transmitter to shut
536 * down. We now issue a restart transmit. Since the
537 * errors close the BD and update the pointers, the restart
538 * _should_ pick up without having to reset any of our
539 * pointers either. Also, To workaround 8260 device erratum
540 * CPM37, we must disable and then re-enable the transmitter
541 * following a Late Collision, Underrun, or Retry Limit error.
542 */
543void tx_restart(struct net_device *dev)
544{
545 struct fs_enet_private *fep = netdev_priv(dev);
546 fcc_t *fccp = fep->fcc.fccp;
547
548 C32(fccp, fcc_gfmr, FCC_GFMR_ENT);
549 udelay(10);
550 S32(fccp, fcc_gfmr, FCC_GFMR_ENT);
551
552 fcc_cr_cmd(fep, 0x0C, CPM_CR_RESTART_TX);
553}
554
555/*************************************************************************/
556
557const struct fs_ops fs_fcc_ops = {
558 .setup_data = setup_data,
559 .cleanup_data = cleanup_data,
560 .set_multicast_list = set_multicast_list,
561 .restart = restart,
562 .stop = stop,
563 .pre_request_irq = pre_request_irq,
564 .post_free_irq = post_free_irq,
565 .napi_clear_rx_event = napi_clear_rx_event,
566 .napi_enable_rx = napi_enable_rx,
567 .napi_disable_rx = napi_disable_rx,
568 .rx_bd_done = rx_bd_done,
569 .tx_kickstart = tx_kickstart,
570 .get_int_events = get_int_events,
571 .clear_int_events = clear_int_events,
572 .ev_error = ev_error,
573 .get_regs = get_regs,
574 .get_regs_len = get_regs_len,
575 .tx_restart = tx_restart,
576 .allocate_bd = allocate_bd,
577 .free_bd = free_bd,
578};
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
new file mode 100644
index 000000000000..5ef4e845a387
--- /dev/null
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -0,0 +1,653 @@
1/*
2 * Freescale Ethernet controllers
3 *
4 * Copyright (c) 2005 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8xx.h>
45#include <asm/commproc.h>
46#endif
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a CPM1 __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_in32(addr) __raw_readl(addr)
57#define __fs_in16(addr) __raw_readw(addr)
58#else
59/* for others play it safe */
60#define __fs_out32(addr, x) out_be32(addr, x)
61#define __fs_out16(addr, x) out_be16(addr, x)
62#define __fs_in32(addr) in_be32(addr)
63#define __fs_in16(addr) in_be16(addr)
64#endif
65
66/* write */
67#define FW(_fecp, _reg, _v) __fs_out32(&(_fecp)->fec_ ## _reg, (_v))
68
69/* read */
70#define FR(_fecp, _reg) __fs_in32(&(_fecp)->fec_ ## _reg)
71
72/* set bits */
73#define FS(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) | (_v))
74
75/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77
78
79/* CRC polynomium used by the FEC for the multicast group filtering */
80#define FEC_CRC_POLY 0x04C11DB7
81
82#define FEC_MAX_MULTICAST_ADDRS 64
83
84/* Interrupt events/masks.
85*/
86#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
87#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
88#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
89#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
90#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
91#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
92#define FEC_ENET_RXF 0x02000000U /* Full frame received */
93#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
94#define FEC_ENET_MII 0x00800000U /* MII interrupt */
95#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
96
97#define FEC_ECNTRL_PINMUX 0x00000004
98#define FEC_ECNTRL_ETHER_EN 0x00000002
99#define FEC_ECNTRL_RESET 0x00000001
100
101#define FEC_RCNTRL_BC_REJ 0x00000010
102#define FEC_RCNTRL_PROM 0x00000008
103#define FEC_RCNTRL_MII_MODE 0x00000004
104#define FEC_RCNTRL_DRT 0x00000002
105#define FEC_RCNTRL_LOOP 0x00000001
106
107#define FEC_TCNTRL_FDEN 0x00000004
108#define FEC_TCNTRL_HBC 0x00000002
109#define FEC_TCNTRL_GTS 0x00000001
110
111
112/* Make MII read/write commands for the FEC.
113*/
114#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
115#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
116#define mk_mii_end 0
117
118#define FEC_MII_LOOPS 10000
119
120/*
121 * Delay to wait for FEC reset command to complete (in us)
122 */
123#define FEC_RESET_DELAY 50
124
125static int whack_reset(fec_t * fecp)
126{
127 int i;
128
129 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET);
130 for (i = 0; i < FEC_RESET_DELAY; i++) {
131 if ((FR(fecp, ecntrl) & FEC_ECNTRL_RESET) == 0)
132 return 0; /* OK */
133 udelay(1);
134 }
135
136 return -1;
137}
138
139static int do_pd_setup(struct fs_enet_private *fep)
140{
141 struct platform_device *pdev = to_platform_device(fep->dev);
142 struct resource *r;
143
144 /* Fill out IRQ field */
145 fep->interrupt = platform_get_irq_byname(pdev,"interrupt");
146
147 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
148 fep->fec.fecp =(void*)r->start;
149
150 if(fep->fec.fecp == NULL)
151 return -EINVAL;
152
153 return 0;
154
155}
156
157#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
158#define FEC_RX_EVENT (FEC_ENET_RXF)
159#define FEC_TX_EVENT (FEC_ENET_TXF)
160#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
161 FEC_ENET_BABT | FEC_ENET_EBERR)
162
163static int setup_data(struct net_device *dev)
164{
165 struct fs_enet_private *fep = netdev_priv(dev);
166
167 if (do_pd_setup(fep) != 0)
168 return -EINVAL;
169
170 fep->fec.hthi = 0;
171 fep->fec.htlo = 0;
172
173 fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
174 fep->ev_rx = FEC_RX_EVENT;
175 fep->ev_tx = FEC_TX_EVENT;
176 fep->ev_err = FEC_ERR_EVENT_MSK;
177
178 return 0;
179}
180
181static int allocate_bd(struct net_device *dev)
182{
183 struct fs_enet_private *fep = netdev_priv(dev);
184 const struct fs_platform_info *fpi = fep->fpi;
185
186 fep->ring_base = dma_alloc_coherent(fep->dev,
187 (fpi->tx_ring + fpi->rx_ring) *
188 sizeof(cbd_t), &fep->ring_mem_addr,
189 GFP_KERNEL);
190 if (fep->ring_base == NULL)
191 return -ENOMEM;
192
193 return 0;
194}
195
196static void free_bd(struct net_device *dev)
197{
198 struct fs_enet_private *fep = netdev_priv(dev);
199 const struct fs_platform_info *fpi = fep->fpi;
200
201 if(fep->ring_base)
202 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring)
203 * sizeof(cbd_t),
204 fep->ring_base,
205 fep->ring_mem_addr);
206}
207
208static void cleanup_data(struct net_device *dev)
209{
210 /* nothing */
211}
212
213static void set_promiscuous_mode(struct net_device *dev)
214{
215 struct fs_enet_private *fep = netdev_priv(dev);
216 fec_t *fecp = fep->fec.fecp;
217
218 FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
219}
220
221static void set_multicast_start(struct net_device *dev)
222{
223 struct fs_enet_private *fep = netdev_priv(dev);
224
225 fep->fec.hthi = 0;
226 fep->fec.htlo = 0;
227}
228
229static void set_multicast_one(struct net_device *dev, const u8 *mac)
230{
231 struct fs_enet_private *fep = netdev_priv(dev);
232 int temp, hash_index, i, j;
233 u32 crc, csrVal;
234 u8 byte, msb;
235
236 crc = 0xffffffff;
237 for (i = 0; i < 6; i++) {
238 byte = mac[i];
239 for (j = 0; j < 8; j++) {
240 msb = crc >> 31;
241 crc <<= 1;
242 if (msb ^ (byte & 0x1))
243 crc ^= FEC_CRC_POLY;
244 byte >>= 1;
245 }
246 }
247
248 temp = (crc & 0x3f) >> 1;
249 hash_index = ((temp & 0x01) << 4) |
250 ((temp & 0x02) << 2) |
251 ((temp & 0x04)) |
252 ((temp & 0x08) >> 2) |
253 ((temp & 0x10) >> 4);
254 csrVal = 1 << hash_index;
255 if (crc & 1)
256 fep->fec.hthi |= csrVal;
257 else
258 fep->fec.htlo |= csrVal;
259}
260
261static void set_multicast_finish(struct net_device *dev)
262{
263 struct fs_enet_private *fep = netdev_priv(dev);
264 fec_t *fecp = fep->fec.fecp;
265
266 /* if all multi or too many multicasts; just enable all */
267 if ((dev->flags & IFF_ALLMULTI) != 0 ||
268 dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
269 fep->fec.hthi = 0xffffffffU;
270 fep->fec.htlo = 0xffffffffU;
271 }
272
273 FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
274 FW(fecp, hash_table_high, fep->fec.hthi);
275 FW(fecp, hash_table_low, fep->fec.htlo);
276}
277
278static void set_multicast_list(struct net_device *dev)
279{
280 struct dev_mc_list *pmc;
281
282 if ((dev->flags & IFF_PROMISC) == 0) {
283 set_multicast_start(dev);
284 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
285 set_multicast_one(dev, pmc->dmi_addr);
286 set_multicast_finish(dev);
287 } else
288 set_promiscuous_mode(dev);
289}
290
291static void restart(struct net_device *dev)
292{
293#ifdef CONFIG_DUET
294 immap_t *immap = fs_enet_immap;
295 u32 cptr;
296#endif
297 struct fs_enet_private *fep = netdev_priv(dev);
298 fec_t *fecp = fep->fec.fecp;
299 const struct fs_platform_info *fpi = fep->fpi;
300 dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
301 int r;
302 u32 addrhi, addrlo;
303
304 r = whack_reset(fep->fec.fecp);
305 if (r != 0)
306 printk(KERN_ERR DRV_MODULE_NAME
307 ": %s FEC Reset FAILED!\n", dev->name);
308
309 /*
310 * Set station address.
311 */
312 addrhi = ((u32) dev->dev_addr[0] << 24) |
313 ((u32) dev->dev_addr[1] << 16) |
314 ((u32) dev->dev_addr[2] << 8) |
315 (u32) dev->dev_addr[3];
316 addrlo = ((u32) dev->dev_addr[4] << 24) |
317 ((u32) dev->dev_addr[5] << 16);
318 FW(fecp, addr_low, addrhi);
319 FW(fecp, addr_high, addrlo);
320
321 /*
322 * Reset all multicast.
323 */
324 FW(fecp, hash_table_high, fep->fec.hthi);
325 FW(fecp, hash_table_low, fep->fec.htlo);
326
327 /*
328 * Set maximum receive buffer size.
329 */
330 FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
331 FW(fecp, r_hash, PKT_MAXBUF_SIZE);
332
333 /* get physical address */
334 rx_bd_base_phys = fep->ring_mem_addr;
335 tx_bd_base_phys = rx_bd_base_phys + sizeof(cbd_t) * fpi->rx_ring;
336
337 /*
338 * Set receive and transmit descriptor base.
339 */
340 FW(fecp, r_des_start, rx_bd_base_phys);
341 FW(fecp, x_des_start, tx_bd_base_phys);
342
343 fs_init_bds(dev);
344
345 /*
346 * Enable big endian and don't care about SDMA FC.
347 */
348 FW(fecp, fun_code, 0x78000000);
349
350 /*
351 * Set MII speed.
352 */
353 FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed);
354
355 /*
356 * Clear any outstanding interrupt.
357 */
358 FW(fecp, ievent, 0xffc0);
359 FW(fecp, ivec, (fep->interrupt / 2) << 29);
360
361
362 /*
363 * adjust to speed (only for DUET & RMII)
364 */
365#ifdef CONFIG_DUET
366 if (fpi->use_rmii) {
367 cptr = in_be32(&immap->im_cpm.cp_cptr);
368 switch (fs_get_fec_index(fpi->fs_no)) {
369 case 0:
370 cptr |= 0x100;
371 if (fep->speed == 10)
372 cptr |= 0x0000010;
373 else if (fep->speed == 100)
374 cptr &= ~0x0000010;
375 break;
376 case 1:
377 cptr |= 0x80;
378 if (fep->speed == 10)
379 cptr |= 0x0000008;
380 else if (fep->speed == 100)
381 cptr &= ~0x0000008;
382 break;
383 default:
384 BUG(); /* should never happen */
385 break;
386 }
387 out_be32(&immap->im_cpm.cp_cptr, cptr);
388 }
389#endif
390
391 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
392 /*
393 * adjust to duplex mode
394 */
395 if (fep->duplex) {
396 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
397 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
398 } else {
399 FS(fecp, r_cntrl, FEC_RCNTRL_DRT);
400 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
401 }
402
403 /*
404 * Enable interrupts we wish to service.
405 */
406 FW(fecp, imask, FEC_ENET_TXF | FEC_ENET_TXB |
407 FEC_ENET_RXF | FEC_ENET_RXB);
408
409 /*
410 * And last, enable the transmit and receive processing.
411 */
412 FW(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
413 FW(fecp, r_des_active, 0x01000000);
414}
415
416static void stop(struct net_device *dev)
417{
418 struct fs_enet_private *fep = netdev_priv(dev);
419 fec_t *fecp = fep->fec.fecp;
420 struct fs_enet_mii_bus *bus = fep->mii_bus;
421 const struct fs_mii_bus_info *bi = bus->bus_info;
422 int i;
423
424 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
425 return; /* already down */
426
427 FW(fecp, x_cntrl, 0x01); /* Graceful transmit stop */
428 for (i = 0; ((FR(fecp, ievent) & 0x10000000) == 0) &&
429 i < FEC_RESET_DELAY; i++)
430 udelay(1);
431
432 if (i == FEC_RESET_DELAY)
433 printk(KERN_WARNING DRV_MODULE_NAME
434 ": %s FEC timeout on graceful transmit stop\n",
435 dev->name);
436 /*
437 * Disable FEC. Let only MII interrupts.
438 */
439 FW(fecp, imask, 0);
440 FC(fecp, ecntrl, FEC_ECNTRL_ETHER_EN);
441
442 fs_cleanup_bds(dev);
443
444 /* shut down FEC1? that's where the mii bus is */
445 if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) {
446 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
447 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
448 FW(fecp, ievent, FEC_ENET_MII);
449 FW(fecp, mii_speed, bus->fec.mii_speed);
450 }
451}
452
453static void pre_request_irq(struct net_device *dev, int irq)
454{
455 immap_t *immap = fs_enet_immap;
456 u32 siel;
457
458 /* SIU interrupt */
459 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
460
461 siel = in_be32(&immap->im_siu_conf.sc_siel);
462 if ((irq & 1) == 0)
463 siel |= (0x80000000 >> irq);
464 else
465 siel &= ~(0x80000000 >> (irq & ~1));
466 out_be32(&immap->im_siu_conf.sc_siel, siel);
467 }
468}
469
470static void post_free_irq(struct net_device *dev, int irq)
471{
472 /* nothing */
473}
474
475static void napi_clear_rx_event(struct net_device *dev)
476{
477 struct fs_enet_private *fep = netdev_priv(dev);
478 fec_t *fecp = fep->fec.fecp;
479
480 FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
481}
482
483static void napi_enable_rx(struct net_device *dev)
484{
485 struct fs_enet_private *fep = netdev_priv(dev);
486 fec_t *fecp = fep->fec.fecp;
487
488 FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
489}
490
491static void napi_disable_rx(struct net_device *dev)
492{
493 struct fs_enet_private *fep = netdev_priv(dev);
494 fec_t *fecp = fep->fec.fecp;
495
496 FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
497}
498
499static void rx_bd_done(struct net_device *dev)
500{
501 struct fs_enet_private *fep = netdev_priv(dev);
502 fec_t *fecp = fep->fec.fecp;
503
504 FW(fecp, r_des_active, 0x01000000);
505}
506
507static void tx_kickstart(struct net_device *dev)
508{
509 struct fs_enet_private *fep = netdev_priv(dev);
510 fec_t *fecp = fep->fec.fecp;
511
512 FW(fecp, x_des_active, 0x01000000);
513}
514
515static u32 get_int_events(struct net_device *dev)
516{
517 struct fs_enet_private *fep = netdev_priv(dev);
518 fec_t *fecp = fep->fec.fecp;
519
520 return FR(fecp, ievent) & FR(fecp, imask);
521}
522
523static void clear_int_events(struct net_device *dev, u32 int_events)
524{
525 struct fs_enet_private *fep = netdev_priv(dev);
526 fec_t *fecp = fep->fec.fecp;
527
528 FW(fecp, ievent, int_events);
529}
530
531static void ev_error(struct net_device *dev, u32 int_events)
532{
533 printk(KERN_WARNING DRV_MODULE_NAME
534 ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
535}
536
537int get_regs(struct net_device *dev, void *p, int *sizep)
538{
539 struct fs_enet_private *fep = netdev_priv(dev);
540
541 if (*sizep < sizeof(fec_t))
542 return -EINVAL;
543
544 memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
545
546 return 0;
547}
548
549int get_regs_len(struct net_device *dev)
550{
551 return sizeof(fec_t);
552}
553
554void tx_restart(struct net_device *dev)
555{
556 /* nothing */
557}
558
559/*************************************************************************/
560
561const struct fs_ops fs_fec_ops = {
562 .setup_data = setup_data,
563 .cleanup_data = cleanup_data,
564 .set_multicast_list = set_multicast_list,
565 .restart = restart,
566 .stop = stop,
567 .pre_request_irq = pre_request_irq,
568 .post_free_irq = post_free_irq,
569 .napi_clear_rx_event = napi_clear_rx_event,
570 .napi_enable_rx = napi_enable_rx,
571 .napi_disable_rx = napi_disable_rx,
572 .rx_bd_done = rx_bd_done,
573 .tx_kickstart = tx_kickstart,
574 .get_int_events = get_int_events,
575 .clear_int_events = clear_int_events,
576 .ev_error = ev_error,
577 .get_regs = get_regs,
578 .get_regs_len = get_regs_len,
579 .tx_restart = tx_restart,
580 .allocate_bd = allocate_bd,
581 .free_bd = free_bd,
582};
583
584/***********************************************************************/
585
586static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
587{
588 fec_t *fecp = bus->fec.fecp;
589 int i, ret = -1;
590
591 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
592 BUG();
593
594 /* Add PHY address to register command. */
595 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
596
597 for (i = 0; i < FEC_MII_LOOPS; i++)
598 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
599 break;
600
601 if (i < FEC_MII_LOOPS) {
602 FW(fecp, ievent, FEC_ENET_MII);
603 ret = FR(fecp, mii_data) & 0xffff;
604 }
605
606 return ret;
607}
608
609static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
610{
611 fec_t *fecp = bus->fec.fecp;
612 int i;
613
614 /* this must never happen */
615 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
616 BUG();
617
618 /* Add PHY address to register command. */
619 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
620
621 for (i = 0; i < FEC_MII_LOOPS; i++)
622 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
623 break;
624
625 if (i < FEC_MII_LOOPS)
626 FW(fecp, ievent, FEC_ENET_MII);
627}
628
629int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
630{
631 bd_t *bd = (bd_t *)__res;
632 const struct fs_mii_bus_info *bi = bus->bus_info;
633 fec_t *fecp;
634
635 if (bi->id != 0)
636 return -1;
637
638 bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
639 bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
640 & 0x3F) << 1;
641
642 fecp = bus->fec.fecp;
643
644 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
645 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
646 FW(fecp, ievent, FEC_ENET_MII);
647 FW(fecp, mii_speed, bus->fec.mii_speed);
648
649 bus->mii_read = mii_read;
650 bus->mii_write = mii_write;
651
652 return 0;
653}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
new file mode 100644
index 000000000000..d8c6e9cadcf5
--- /dev/null
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -0,0 +1,524 @@
1/*
2 * Ethernet on Serial Communications Controller (SCC) driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
37
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#ifdef CONFIG_8xx
42#include <asm/8xx_immap.h>
43#include <asm/pgtable.h>
44#include <asm/mpc8xx.h>
45#include <asm/commproc.h>
46#endif
47
48#include "fs_enet.h"
49
50/*************************************************/
51
52#if defined(CONFIG_CPM1)
53/* for a 8xx __raw_xxx's are sufficient */
54#define __fs_out32(addr, x) __raw_writel(x, addr)
55#define __fs_out16(addr, x) __raw_writew(x, addr)
56#define __fs_out8(addr, x) __raw_writeb(x, addr)
57#define __fs_in32(addr) __raw_readl(addr)
58#define __fs_in16(addr) __raw_readw(addr)
59#define __fs_in8(addr) __raw_readb(addr)
60#else
61/* for others play it safe */
62#define __fs_out32(addr, x) out_be32(addr, x)
63#define __fs_out16(addr, x) out_be16(addr, x)
64#define __fs_in32(addr) in_be32(addr)
65#define __fs_in16(addr) in_be16(addr)
66#endif
67
68/* write, read, set bits, clear bits */
69#define W32(_p, _m, _v) __fs_out32(&(_p)->_m, (_v))
70#define R32(_p, _m) __fs_in32(&(_p)->_m)
71#define S32(_p, _m, _v) W32(_p, _m, R32(_p, _m) | (_v))
72#define C32(_p, _m, _v) W32(_p, _m, R32(_p, _m) & ~(_v))
73
74#define W16(_p, _m, _v) __fs_out16(&(_p)->_m, (_v))
75#define R16(_p, _m) __fs_in16(&(_p)->_m)
76#define S16(_p, _m, _v) W16(_p, _m, R16(_p, _m) | (_v))
77#define C16(_p, _m, _v) W16(_p, _m, R16(_p, _m) & ~(_v))
78
79#define W8(_p, _m, _v) __fs_out8(&(_p)->_m, (_v))
80#define R8(_p, _m) __fs_in8(&(_p)->_m)
81#define S8(_p, _m, _v) W8(_p, _m, R8(_p, _m) | (_v))
82#define C8(_p, _m, _v) W8(_p, _m, R8(_p, _m) & ~(_v))
83
84#define SCC_MAX_MULTICAST_ADDRS 64
85
86/*
87 * Delay to wait for SCC reset command to complete (in us)
88 */
89#define SCC_RESET_DELAY 50
90#define MAX_CR_CMD_LOOPS 10000
91
92static inline int scc_cr_cmd(struct fs_enet_private *fep, u32 op)
93{
94 cpm8xx_t *cpmp = &((immap_t *)fs_enet_immap)->im_cpm;
95 u32 v, ch;
96 int i = 0;
97
98 ch = fep->scc.idx << 2;
99 v = mk_cr_cmd(ch, op);
100 W16(cpmp, cp_cpcr, v | CPM_CR_FLG);
101 for (i = 0; i < MAX_CR_CMD_LOOPS; i++)
102 if ((R16(cpmp, cp_cpcr) & CPM_CR_FLG) == 0)
103 break;
104
105 if (i >= MAX_CR_CMD_LOOPS) {
106 printk(KERN_ERR "%s(): Not able to issue CPM command\n",
107 __FUNCTION__);
108 return 1;
109 }
110 return 0;
111}
112
113static int do_pd_setup(struct fs_enet_private *fep)
114{
115 struct platform_device *pdev = to_platform_device(fep->dev);
116 struct resource *r;
117
118 /* Fill out IRQ field */
119 fep->interrupt = platform_get_irq_byname(pdev, "interrupt");
120
121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
122 fep->scc.sccp = (void *)r->start;
123
124 if (fep->scc.sccp == NULL)
125 return -EINVAL;
126
127 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pram");
128 fep->scc.ep = (void *)r->start;
129
130 if (fep->scc.ep == NULL)
131 return -EINVAL;
132
133 return 0;
134}
135
136#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
137#define SCC_RX_EVENT (SCCE_ENET_RXF)
138#define SCC_TX_EVENT (SCCE_ENET_TXB)
139#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
140
141static int setup_data(struct net_device *dev)
142{
143 struct fs_enet_private *fep = netdev_priv(dev);
144 const struct fs_platform_info *fpi = fep->fpi;
145
146 fep->scc.idx = fs_get_scc_index(fpi->fs_no);
147 if ((unsigned int)fep->fcc.idx > 4) /* max 4 SCCs */
148 return -EINVAL;
149
150 do_pd_setup(fep);
151
152 fep->scc.hthi = 0;
153 fep->scc.htlo = 0;
154
155 fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
156 fep->ev_rx = SCC_RX_EVENT;
157 fep->ev_tx = SCC_TX_EVENT;
158 fep->ev_err = SCC_ERR_EVENT_MSK;
159
160 return 0;
161}
162
163static int allocate_bd(struct net_device *dev)
164{
165 struct fs_enet_private *fep = netdev_priv(dev);
166 const struct fs_platform_info *fpi = fep->fpi;
167
168 fep->ring_mem_addr = cpm_dpalloc((fpi->tx_ring + fpi->rx_ring) *
169 sizeof(cbd_t), 8);
170 if (IS_DPERR(fep->ring_mem_addr))
171 return -ENOMEM;
172
173 fep->ring_base = cpm_dpram_addr(fep->ring_mem_addr);
174
175 return 0;
176}
177
178static void free_bd(struct net_device *dev)
179{
180 struct fs_enet_private *fep = netdev_priv(dev);
181
182 if (fep->ring_base)
183 cpm_dpfree(fep->ring_mem_addr);
184}
185
186static void cleanup_data(struct net_device *dev)
187{
188 /* nothing */
189}
190
191static void set_promiscuous_mode(struct net_device *dev)
192{
193 struct fs_enet_private *fep = netdev_priv(dev);
194 scc_t *sccp = fep->scc.sccp;
195
196 S16(sccp, scc_psmr, SCC_PSMR_PRO);
197}
198
199static void set_multicast_start(struct net_device *dev)
200{
201 struct fs_enet_private *fep = netdev_priv(dev);
202 scc_enet_t *ep = fep->scc.ep;
203
204 W16(ep, sen_gaddr1, 0);
205 W16(ep, sen_gaddr2, 0);
206 W16(ep, sen_gaddr3, 0);
207 W16(ep, sen_gaddr4, 0);
208}
209
210static void set_multicast_one(struct net_device *dev, const u8 * mac)
211{
212 struct fs_enet_private *fep = netdev_priv(dev);
213 scc_enet_t *ep = fep->scc.ep;
214 u16 taddrh, taddrm, taddrl;
215
216 taddrh = ((u16) mac[5] << 8) | mac[4];
217 taddrm = ((u16) mac[3] << 8) | mac[2];
218 taddrl = ((u16) mac[1] << 8) | mac[0];
219
220 W16(ep, sen_taddrh, taddrh);
221 W16(ep, sen_taddrm, taddrm);
222 W16(ep, sen_taddrl, taddrl);
223 scc_cr_cmd(fep, CPM_CR_SET_GADDR);
224}
225
226static void set_multicast_finish(struct net_device *dev)
227{
228 struct fs_enet_private *fep = netdev_priv(dev);
229 scc_t *sccp = fep->scc.sccp;
230 scc_enet_t *ep = fep->scc.ep;
231
232 /* clear promiscuous always */
233 C16(sccp, scc_psmr, SCC_PSMR_PRO);
234
235 /* if all multi or too many multicasts; just enable all */
236 if ((dev->flags & IFF_ALLMULTI) != 0 ||
237 dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
238
239 W16(ep, sen_gaddr1, 0xffff);
240 W16(ep, sen_gaddr2, 0xffff);
241 W16(ep, sen_gaddr3, 0xffff);
242 W16(ep, sen_gaddr4, 0xffff);
243 }
244}
245
246static void set_multicast_list(struct net_device *dev)
247{
248 struct dev_mc_list *pmc;
249
250 if ((dev->flags & IFF_PROMISC) == 0) {
251 set_multicast_start(dev);
252 for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
253 set_multicast_one(dev, pmc->dmi_addr);
254 set_multicast_finish(dev);
255 } else
256 set_promiscuous_mode(dev);
257}
258
259/*
260 * This function is called to start or restart the FEC during a link
261 * change. This only happens when switching between half and full
262 * duplex.
263 */
264static void restart(struct net_device *dev)
265{
266 struct fs_enet_private *fep = netdev_priv(dev);
267 scc_t *sccp = fep->scc.sccp;
268 scc_enet_t *ep = fep->scc.ep;
269 const struct fs_platform_info *fpi = fep->fpi;
270 u16 paddrh, paddrm, paddrl;
271 const unsigned char *mac;
272 int i;
273
274 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
275
276 /* clear everything (slow & steady does it) */
277 for (i = 0; i < sizeof(*ep); i++)
278 __fs_out8((char *)ep + i, 0);
279
280 /* point to bds */
281 W16(ep, sen_genscc.scc_rbase, fep->ring_mem_addr);
282 W16(ep, sen_genscc.scc_tbase,
283 fep->ring_mem_addr + sizeof(cbd_t) * fpi->rx_ring);
284
285 /* Initialize function code registers for big-endian.
286 */
287 W8(ep, sen_genscc.scc_rfcr, SCC_EB);
288 W8(ep, sen_genscc.scc_tfcr, SCC_EB);
289
290 /* Set maximum bytes per receive buffer.
291 * This appears to be an Ethernet frame size, not the buffer
292 * fragment size. It must be a multiple of four.
293 */
294 W16(ep, sen_genscc.scc_mrblr, 0x5f0);
295
296 /* Set CRC preset and mask.
297 */
298 W32(ep, sen_cpres, 0xffffffff);
299 W32(ep, sen_cmask, 0xdebb20e3);
300
301 W32(ep, sen_crcec, 0); /* CRC Error counter */
302 W32(ep, sen_alec, 0); /* alignment error counter */
303 W32(ep, sen_disfc, 0); /* discard frame counter */
304
305 W16(ep, sen_pads, 0x8888); /* Tx short frame pad character */
306 W16(ep, sen_retlim, 15); /* Retry limit threshold */
307
308 W16(ep, sen_maxflr, 0x5ee); /* maximum frame length register */
309
310 W16(ep, sen_minflr, PKT_MINBUF_SIZE); /* minimum frame length register */
311
312 W16(ep, sen_maxd1, 0x000005f0); /* maximum DMA1 length */
313 W16(ep, sen_maxd2, 0x000005f0); /* maximum DMA2 length */
314
315 /* Clear hash tables.
316 */
317 W16(ep, sen_gaddr1, 0);
318 W16(ep, sen_gaddr2, 0);
319 W16(ep, sen_gaddr3, 0);
320 W16(ep, sen_gaddr4, 0);
321 W16(ep, sen_iaddr1, 0);
322 W16(ep, sen_iaddr2, 0);
323 W16(ep, sen_iaddr3, 0);
324 W16(ep, sen_iaddr4, 0);
325
326 /* set address
327 */
328 mac = dev->dev_addr;
329 paddrh = ((u16) mac[5] << 8) | mac[4];
330 paddrm = ((u16) mac[3] << 8) | mac[2];
331 paddrl = ((u16) mac[1] << 8) | mac[0];
332
333 W16(ep, sen_paddrh, paddrh);
334 W16(ep, sen_paddrm, paddrm);
335 W16(ep, sen_paddrl, paddrl);
336
337 W16(ep, sen_pper, 0);
338 W16(ep, sen_taddrl, 0);
339 W16(ep, sen_taddrm, 0);
340 W16(ep, sen_taddrh, 0);
341
342 fs_init_bds(dev);
343
344 scc_cr_cmd(fep, CPM_CR_INIT_TRX);
345
346 W16(sccp, scc_scce, 0xffff);
347
348 /* Enable interrupts we wish to service.
349 */
350 W16(sccp, scc_sccm, SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
351
352 /* Set GSMR_H to enable all normal operating modes.
353 * Set GSMR_L to enable Ethernet to MC68160.
354 */
355 W32(sccp, scc_gsmrh, 0);
356 W32(sccp, scc_gsmrl,
357 SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 |
358 SCC_GSMRL_MODE_ENET);
359
360 /* Set sync/delimiters.
361 */
362 W16(sccp, scc_dsr, 0xd555);
363
364 /* Set processing mode. Use Ethernet CRC, catch broadcast, and
365 * start frame search 22 bit times after RENA.
366 */
367 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
368
369 /* Set full duplex mode if needed */
370 if (fep->duplex)
371 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
372
373 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
374}
375
376static void stop(struct net_device *dev)
377{
378 struct fs_enet_private *fep = netdev_priv(dev);
379 scc_t *sccp = fep->scc.sccp;
380 int i;
381
382 for (i = 0; (R16(sccp, scc_sccm) == 0) && i < SCC_RESET_DELAY; i++)
383 udelay(1);
384
385 if (i == SCC_RESET_DELAY)
386 printk(KERN_WARNING DRV_MODULE_NAME
387 ": %s SCC timeout on graceful transmit stop\n",
388 dev->name);
389
390 W16(sccp, scc_sccm, 0);
391 C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
392
393 fs_cleanup_bds(dev);
394}
395
396static void pre_request_irq(struct net_device *dev, int irq)
397{
398 immap_t *immap = fs_enet_immap;
399 u32 siel;
400
401 /* SIU interrupt */
402 if (irq >= SIU_IRQ0 && irq < SIU_LEVEL7) {
403
404 siel = in_be32(&immap->im_siu_conf.sc_siel);
405 if ((irq & 1) == 0)
406 siel |= (0x80000000 >> irq);
407 else
408 siel &= ~(0x80000000 >> (irq & ~1));
409 out_be32(&immap->im_siu_conf.sc_siel, siel);
410 }
411}
412
413static void post_free_irq(struct net_device *dev, int irq)
414{
415 /* nothing */
416}
417
418static void napi_clear_rx_event(struct net_device *dev)
419{
420 struct fs_enet_private *fep = netdev_priv(dev);
421 scc_t *sccp = fep->scc.sccp;
422
423 W16(sccp, scc_scce, SCC_NAPI_RX_EVENT_MSK);
424}
425
426static void napi_enable_rx(struct net_device *dev)
427{
428 struct fs_enet_private *fep = netdev_priv(dev);
429 scc_t *sccp = fep->scc.sccp;
430
431 S16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
432}
433
434static void napi_disable_rx(struct net_device *dev)
435{
436 struct fs_enet_private *fep = netdev_priv(dev);
437 scc_t *sccp = fep->scc.sccp;
438
439 C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
440}
441
442static void rx_bd_done(struct net_device *dev)
443{
444 /* nothing */
445}
446
447static void tx_kickstart(struct net_device *dev)
448{
449 /* nothing */
450}
451
452static u32 get_int_events(struct net_device *dev)
453{
454 struct fs_enet_private *fep = netdev_priv(dev);
455 scc_t *sccp = fep->scc.sccp;
456
457 return (u32) R16(sccp, scc_scce);
458}
459
460static void clear_int_events(struct net_device *dev, u32 int_events)
461{
462 struct fs_enet_private *fep = netdev_priv(dev);
463 scc_t *sccp = fep->scc.sccp;
464
465 W16(sccp, scc_scce, int_events & 0xffff);
466}
467
468static void ev_error(struct net_device *dev, u32 int_events)
469{
470 printk(KERN_WARNING DRV_MODULE_NAME
471 ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
472}
473
474static int get_regs(struct net_device *dev, void *p, int *sizep)
475{
476 struct fs_enet_private *fep = netdev_priv(dev);
477
478 if (*sizep < sizeof(scc_t) + sizeof(scc_enet_t))
479 return -EINVAL;
480
481 memcpy_fromio(p, fep->scc.sccp, sizeof(scc_t));
482 p = (char *)p + sizeof(scc_t);
483
484 memcpy_fromio(p, fep->scc.ep, sizeof(scc_enet_t));
485
486 return 0;
487}
488
489static int get_regs_len(struct net_device *dev)
490{
491 return sizeof(scc_t) + sizeof(scc_enet_t);
492}
493
494static void tx_restart(struct net_device *dev)
495{
496 struct fs_enet_private *fep = netdev_priv(dev);
497
498 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
499}
500
501/*************************************************************************/
502
503const struct fs_ops fs_scc_ops = {
504 .setup_data = setup_data,
505 .cleanup_data = cleanup_data,
506 .set_multicast_list = set_multicast_list,
507 .restart = restart,
508 .stop = stop,
509 .pre_request_irq = pre_request_irq,
510 .post_free_irq = post_free_irq,
511 .napi_clear_rx_event = napi_clear_rx_event,
512 .napi_enable_rx = napi_enable_rx,
513 .napi_disable_rx = napi_disable_rx,
514 .rx_bd_done = rx_bd_done,
515 .tx_kickstart = tx_kickstart,
516 .get_int_events = get_int_events,
517 .clear_int_events = clear_int_events,
518 .ev_error = ev_error,
519 .get_regs = get_regs,
520 .get_regs_len = get_regs_len,
521 .tx_restart = tx_restart,
522 .allocate_bd = allocate_bd,
523 .free_bd = free_bd,
524};
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
new file mode 100644
index 000000000000..24a5e2e23d18
--- /dev/null
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -0,0 +1,405 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42#include "fs_enet.h"
43
44#ifdef CONFIG_8xx
45static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
46{
47 immap_t *im = (immap_t *)fs_enet_immap;
48 void *dir, *dat, *ppar;
49 int adv;
50 u8 msk;
51
52 switch (port) {
53 case fsiop_porta:
54 dir = &im->im_ioport.iop_padir;
55 dat = &im->im_ioport.iop_padat;
56 ppar = &im->im_ioport.iop_papar;
57 break;
58
59 case fsiop_portb:
60 dir = &im->im_cpm.cp_pbdir;
61 dat = &im->im_cpm.cp_pbdat;
62 ppar = &im->im_cpm.cp_pbpar;
63 break;
64
65 case fsiop_portc:
66 dir = &im->im_ioport.iop_pcdir;
67 dat = &im->im_ioport.iop_pcdat;
68 ppar = &im->im_ioport.iop_pcpar;
69 break;
70
71 case fsiop_portd:
72 dir = &im->im_ioport.iop_pddir;
73 dat = &im->im_ioport.iop_pddat;
74 ppar = &im->im_ioport.iop_pdpar;
75 break;
76
77 case fsiop_porte:
78 dir = &im->im_cpm.cp_pedir;
79 dat = &im->im_cpm.cp_pedat;
80 ppar = &im->im_cpm.cp_pepar;
81 break;
82
83 default:
84 printk(KERN_ERR DRV_MODULE_NAME
85 "Illegal port value %d!\n", port);
86 return -EINVAL;
87 }
88
89 adv = bit >> 3;
90 dir = (char *)dir + adv;
91 dat = (char *)dat + adv;
92 ppar = (char *)ppar + adv;
93
94 msk = 1 << (7 - (bit & 7));
95 if ((in_8(ppar) & msk) != 0) {
96 printk(KERN_ERR DRV_MODULE_NAME
97 "pin %d on port %d is not general purpose!\n", bit, port);
98 return -EINVAL;
99 }
100
101 *dirp = dir;
102 *datp = dat;
103 *mskp = msk;
104
105 return 0;
106}
107#endif
108
109#ifdef CONFIG_8260
110static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
111{
112 iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
113 void *dir, *dat, *ppar;
114 int adv;
115 u8 msk;
116
117 switch (port) {
118 case fsiop_porta:
119 dir = &io->iop_pdira;
120 dat = &io->iop_pdata;
121 ppar = &io->iop_ppara;
122 break;
123
124 case fsiop_portb:
125 dir = &io->iop_pdirb;
126 dat = &io->iop_pdatb;
127 ppar = &io->iop_pparb;
128 break;
129
130 case fsiop_portc:
131 dir = &io->iop_pdirc;
132 dat = &io->iop_pdatc;
133 ppar = &io->iop_pparc;
134 break;
135
136 case fsiop_portd:
137 dir = &io->iop_pdird;
138 dat = &io->iop_pdatd;
139 ppar = &io->iop_ppard;
140 break;
141
142 default:
143 printk(KERN_ERR DRV_MODULE_NAME
144 "Illegal port value %d!\n", port);
145 return -EINVAL;
146 }
147
148 adv = bit >> 3;
149 dir = (char *)dir + adv;
150 dat = (char *)dat + adv;
151 ppar = (char *)ppar + adv;
152
153 msk = 1 << (7 - (bit & 7));
154 if ((in_8(ppar) & msk) != 0) {
155 printk(KERN_ERR DRV_MODULE_NAME
156 "pin %d on port %d is not general purpose!\n", bit, port);
157 return -EINVAL;
158 }
159
160 *dirp = dir;
161 *datp = dat;
162 *mskp = msk;
163
164 return 0;
165}
166#endif
167
168static inline void bb_set(u8 *p, u8 m)
169{
170 out_8(p, in_8(p) | m);
171}
172
173static inline void bb_clr(u8 *p, u8 m)
174{
175 out_8(p, in_8(p) & ~m);
176}
177
178static inline int bb_read(u8 *p, u8 m)
179{
180 return (in_8(p) & m) != 0;
181}
182
183static inline void mdio_active(struct fs_enet_mii_bus *bus)
184{
185 bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
186}
187
188static inline void mdio_tristate(struct fs_enet_mii_bus *bus)
189{
190 bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk);
191}
192
193static inline int mdio_read(struct fs_enet_mii_bus *bus)
194{
195 return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
196}
197
198static inline void mdio(struct fs_enet_mii_bus *bus, int what)
199{
200 if (what)
201 bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
202 else
203 bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk);
204}
205
206static inline void mdc(struct fs_enet_mii_bus *bus, int what)
207{
208 if (what)
209 bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
210 else
211 bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk);
212}
213
214static inline void mii_delay(struct fs_enet_mii_bus *bus)
215{
216 udelay(bus->bus_info->i.bitbang.delay);
217}
218
219/* Utility to send the preamble, address, and register (common to read and write). */
220static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
221{
222 int j;
223
224 /*
225 * Send a 32 bit preamble ('1's) with an extra '1' bit for good measure.
226 * The IEEE spec says this is a PHY optional requirement. The AMD
227 * 79C874 requires one after power up and one after a MII communications
228 * error. This means that we are doing more preambles than we need,
229 * but it is safer and will be much more robust.
230 */
231
232 mdio_active(bus);
233 mdio(bus, 1);
234 for (j = 0; j < 32; j++) {
235 mdc(bus, 0);
236 mii_delay(bus);
237 mdc(bus, 1);
238 mii_delay(bus);
239 }
240
241 /* send the start bit (01) and the read opcode (10) or write (10) */
242 mdc(bus, 0);
243 mdio(bus, 0);
244 mii_delay(bus);
245 mdc(bus, 1);
246 mii_delay(bus);
247 mdc(bus, 0);
248 mdio(bus, 1);
249 mii_delay(bus);
250 mdc(bus, 1);
251 mii_delay(bus);
252 mdc(bus, 0);
253 mdio(bus, read);
254 mii_delay(bus);
255 mdc(bus, 1);
256 mii_delay(bus);
257 mdc(bus, 0);
258 mdio(bus, !read);
259 mii_delay(bus);
260 mdc(bus, 1);
261 mii_delay(bus);
262
263 /* send the PHY address */
264 for (j = 0; j < 5; j++) {
265 mdc(bus, 0);
266 mdio(bus, (addr & 0x10) != 0);
267 mii_delay(bus);
268 mdc(bus, 1);
269 mii_delay(bus);
270 addr <<= 1;
271 }
272
273 /* send the register address */
274 for (j = 0; j < 5; j++) {
275 mdc(bus, 0);
276 mdio(bus, (reg & 0x10) != 0);
277 mii_delay(bus);
278 mdc(bus, 1);
279 mii_delay(bus);
280 reg <<= 1;
281 }
282}
283
284static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
285{
286 u16 rdreg;
287 int ret, j;
288 u8 addr = phy_id & 0xff;
289 u8 reg = location & 0xff;
290
291 bitbang_pre(bus, 1, addr, reg);
292
293 /* tri-state our MDIO I/O pin so we can read */
294 mdc(bus, 0);
295 mdio_tristate(bus);
296 mii_delay(bus);
297 mdc(bus, 1);
298 mii_delay(bus);
299
300 /* check the turnaround bit: the PHY should be driving it to zero */
301 if (mdio_read(bus) != 0) {
302 /* PHY didn't drive TA low */
303 for (j = 0; j < 32; j++) {
304 mdc(bus, 0);
305 mii_delay(bus);
306 mdc(bus, 1);
307 mii_delay(bus);
308 }
309 ret = -1;
310 goto out;
311 }
312
313 mdc(bus, 0);
314 mii_delay(bus);
315
316 /* read 16 bits of register data, MSB first */
317 rdreg = 0;
318 for (j = 0; j < 16; j++) {
319 mdc(bus, 1);
320 mii_delay(bus);
321 rdreg <<= 1;
322 rdreg |= mdio_read(bus);
323 mdc(bus, 0);
324 mii_delay(bus);
325 }
326
327 mdc(bus, 1);
328 mii_delay(bus);
329 mdc(bus, 0);
330 mii_delay(bus);
331 mdc(bus, 1);
332 mii_delay(bus);
333
334 ret = rdreg;
335out:
336 return ret;
337}
338
339static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
340{
341 int j;
342 u8 addr = phy_id & 0xff;
343 u8 reg = location & 0xff;
344 u16 value = val & 0xffff;
345
346 bitbang_pre(bus, 0, addr, reg);
347
348 /* send the turnaround (10) */
349 mdc(bus, 0);
350 mdio(bus, 1);
351 mii_delay(bus);
352 mdc(bus, 1);
353 mii_delay(bus);
354 mdc(bus, 0);
355 mdio(bus, 0);
356 mii_delay(bus);
357 mdc(bus, 1);
358 mii_delay(bus);
359
360 /* write 16 bits of register data, MSB first */
361 for (j = 0; j < 16; j++) {
362 mdc(bus, 0);
363 mdio(bus, (value & 0x8000) != 0);
364 mii_delay(bus);
365 mdc(bus, 1);
366 mii_delay(bus);
367 value <<= 1;
368 }
369
370 /*
371 * Tri-state the MDIO line.
372 */
373 mdio_tristate(bus);
374 mdc(bus, 0);
375 mii_delay(bus);
376 mdc(bus, 1);
377 mii_delay(bus);
378}
379
380int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus)
381{
382 const struct fs_mii_bus_info *bi = bus->bus_info;
383 int r;
384
385 r = bitbang_prep_bit(&bus->bitbang.mdio_dir,
386 &bus->bitbang.mdio_dat,
387 &bus->bitbang.mdio_msk,
388 bi->i.bitbang.mdio_port,
389 bi->i.bitbang.mdio_bit);
390 if (r != 0)
391 return r;
392
393 r = bitbang_prep_bit(&bus->bitbang.mdc_dir,
394 &bus->bitbang.mdc_dat,
395 &bus->bitbang.mdc_msk,
396 bi->i.bitbang.mdc_port,
397 bi->i.bitbang.mdc_bit);
398 if (r != 0)
399 return r;
400
401 bus->mii_read = mii_read;
402 bus->mii_write = mii_write;
403
404 return 0;
405}
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
new file mode 100644
index 000000000000..b3e192d612e5
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fixed.c
@@ -0,0 +1,92 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40#include <asm/uaccess.h>
41
42#include "fs_enet.h"
43
44static const u16 mii_regs[7] = {
45 0x3100,
46 0x786d,
47 0x0fff,
48 0x0fff,
49 0x01e1,
50 0x45e1,
51 0x0003,
52};
53
54static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
55{
56 int ret = 0;
57
58 if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
59 return -1;
60
61 if (location != 5)
62 ret = mii_regs[location];
63 else
64 ret = bus->fixed.lpa;
65
66 return ret;
67}
68
69static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
70{
71 /* do nothing */
72}
73
74int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
75{
76 const struct fs_mii_bus_info *bi = bus->bus_info;
77
78 bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
79
80 /* if speed is fixed at 10Mb, remove 100Mb modes */
81 if (bi->i.fixed.speed == 10)
82 bus->fixed.lpa &= ~LPA_100;
83
84 /* if duplex is half, remove full duplex modes */
85 if (bi->i.fixed.duplex == 0)
86 bus->fixed.lpa &= ~LPA_DUPLEX;
87
88 bus->mii_read = mii_read;
89 bus->mii_write = mii_write;
90
91 return 0;
92}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 6518334b9280..ae5a2ed3b264 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -29,12 +29,7 @@
29 * define the configuration needed by the board are defined in a 29 * define the configuration needed by the board are defined in a
30 * board structure in arch/ppc/platforms (though I do not 30 * board structure in arch/ppc/platforms (though I do not
31 * discount the possibility that other architectures could one 31 * discount the possibility that other architectures could one
32 * day be supported. One assumption the driver currently makes 32 * day be supported.
33 * is that the PHY is configured in such a way to advertise all
34 * capabilities. This is a sensible default, and on certain
35 * PHYs, changing this default encounters substantial errata
36 * issues. Future versions may remove this requirement, but for
37 * now, it is best for the firmware to ensure this is the case.
38 * 33 *
39 * The Gianfar Ethernet Controller uses a ring of buffer 34 * The Gianfar Ethernet Controller uses a ring of buffer
40 * descriptors. The beginning is indicated by a register 35 * descriptors. The beginning is indicated by a register
@@ -47,7 +42,7 @@
47 * corresponding bit in the IMASK register is also set (if 42 * corresponding bit in the IMASK register is also set (if
48 * interrupt coalescing is active, then the interrupt may not 43 * interrupt coalescing is active, then the interrupt may not
49 * happen immediately, but will wait until either a set number 44 * happen immediately, but will wait until either a set number
50 * of frames or amount of time have passed.). In NAPI, the 45 * of frames or amount of time have passed). In NAPI, the
51 * interrupt handler will signal there is work to be done, and 46 * interrupt handler will signal there is work to be done, and
52 * exit. Without NAPI, the packet(s) will be handled 47 * exit. Without NAPI, the packet(s) will be handled
53 * immediately. Both methods will start at the last known empty 48 * immediately. Both methods will start at the last known empty
@@ -75,6 +70,7 @@
75#include <linux/sched.h> 70#include <linux/sched.h>
76#include <linux/string.h> 71#include <linux/string.h>
77#include <linux/errno.h> 72#include <linux/errno.h>
73#include <linux/unistd.h>
78#include <linux/slab.h> 74#include <linux/slab.h>
79#include <linux/interrupt.h> 75#include <linux/interrupt.h>
80#include <linux/init.h> 76#include <linux/init.h>
@@ -97,9 +93,11 @@
97#include <linux/version.h> 93#include <linux/version.h>
98#include <linux/dma-mapping.h> 94#include <linux/dma-mapping.h>
99#include <linux/crc32.h> 95#include <linux/crc32.h>
96#include <linux/mii.h>
97#include <linux/phy.h>
100 98
101#include "gianfar.h" 99#include "gianfar.h"
102#include "gianfar_phy.h" 100#include "gianfar_mii.h"
103 101
104#define TX_TIMEOUT (1*HZ) 102#define TX_TIMEOUT (1*HZ)
105#define SKB_ALLOC_TIMEOUT 1000000 103#define SKB_ALLOC_TIMEOUT 1000000
@@ -113,9 +111,8 @@
113#endif 111#endif
114 112
115const char gfar_driver_name[] = "Gianfar Ethernet"; 113const char gfar_driver_name[] = "Gianfar Ethernet";
116const char gfar_driver_version[] = "1.1"; 114const char gfar_driver_version[] = "1.2";
117 115
118int startup_gfar(struct net_device *dev);
119static int gfar_enet_open(struct net_device *dev); 116static int gfar_enet_open(struct net_device *dev);
120static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 117static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
121static void gfar_timeout(struct net_device *dev); 118static void gfar_timeout(struct net_device *dev);
@@ -126,17 +123,13 @@ static int gfar_set_mac_address(struct net_device *dev);
126static int gfar_change_mtu(struct net_device *dev, int new_mtu); 123static int gfar_change_mtu(struct net_device *dev, int new_mtu);
127static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs); 124static irqreturn_t gfar_error(int irq, void *dev_id, struct pt_regs *regs);
128static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs); 125static irqreturn_t gfar_transmit(int irq, void *dev_id, struct pt_regs *regs);
129static irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
130static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs); 126static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs);
131static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
132static void gfar_phy_change(void *data);
133static void gfar_phy_timer(unsigned long data);
134static void adjust_link(struct net_device *dev); 127static void adjust_link(struct net_device *dev);
135static void init_registers(struct net_device *dev); 128static void init_registers(struct net_device *dev);
136static int init_phy(struct net_device *dev); 129static int init_phy(struct net_device *dev);
137static int gfar_probe(struct device *device); 130static int gfar_probe(struct device *device);
138static int gfar_remove(struct device *device); 131static int gfar_remove(struct device *device);
139void free_skb_resources(struct gfar_private *priv); 132static void free_skb_resources(struct gfar_private *priv);
140static void gfar_set_multi(struct net_device *dev); 133static void gfar_set_multi(struct net_device *dev);
141static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 134static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
142#ifdef CONFIG_GFAR_NAPI 135#ifdef CONFIG_GFAR_NAPI
@@ -144,7 +137,6 @@ static int gfar_poll(struct net_device *dev, int *budget);
144#endif 137#endif
145int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 138int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
146static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length); 139static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
147static void gfar_phy_startup_timer(unsigned long data);
148static void gfar_vlan_rx_register(struct net_device *netdev, 140static void gfar_vlan_rx_register(struct net_device *netdev,
149 struct vlan_group *grp); 141 struct vlan_group *grp);
150static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 142static void gfar_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
@@ -162,6 +154,9 @@ int gfar_uses_fcb(struct gfar_private *priv)
162 else 154 else
163 return 0; 155 return 0;
164} 156}
157
158/* Set up the ethernet device structure, private data,
159 * and anything else we need before we start */
165static int gfar_probe(struct device *device) 160static int gfar_probe(struct device *device)
166{ 161{
167 u32 tempval; 162 u32 tempval;
@@ -175,7 +170,7 @@ static int gfar_probe(struct device *device)
175 170
176 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data; 171 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
177 172
178 if (einfo == NULL) { 173 if (NULL == einfo) {
179 printk(KERN_ERR "gfar %d: Missing additional data!\n", 174 printk(KERN_ERR "gfar %d: Missing additional data!\n",
180 pdev->id); 175 pdev->id);
181 176
@@ -185,7 +180,7 @@ static int gfar_probe(struct device *device)
185 /* Create an ethernet device instance */ 180 /* Create an ethernet device instance */
186 dev = alloc_etherdev(sizeof (*priv)); 181 dev = alloc_etherdev(sizeof (*priv));
187 182
188 if (dev == NULL) 183 if (NULL == dev)
189 return -ENOMEM; 184 return -ENOMEM;
190 185
191 priv = netdev_priv(dev); 186 priv = netdev_priv(dev);
@@ -207,20 +202,11 @@ static int gfar_probe(struct device *device)
207 priv->regs = (struct gfar *) 202 priv->regs = (struct gfar *)
208 ioremap(r->start, sizeof (struct gfar)); 203 ioremap(r->start, sizeof (struct gfar));
209 204
210 if (priv->regs == NULL) { 205 if (NULL == priv->regs) {
211 err = -ENOMEM; 206 err = -ENOMEM;
212 goto regs_fail; 207 goto regs_fail;
213 } 208 }
214 209
215 /* Set the PHY base address */
216 priv->phyregs = (struct gfar *)
217 ioremap(einfo->phy_reg_addr, sizeof (struct gfar));
218
219 if (priv->phyregs == NULL) {
220 err = -ENOMEM;
221 goto phy_regs_fail;
222 }
223
224 spin_lock_init(&priv->lock); 210 spin_lock_init(&priv->lock);
225 211
226 dev_set_drvdata(device, dev); 212 dev_set_drvdata(device, dev);
@@ -386,12 +372,10 @@ static int gfar_probe(struct device *device)
386 return 0; 372 return 0;
387 373
388register_fail: 374register_fail:
389 iounmap((void *) priv->phyregs);
390phy_regs_fail:
391 iounmap((void *) priv->regs); 375 iounmap((void *) priv->regs);
392regs_fail: 376regs_fail:
393 free_netdev(dev); 377 free_netdev(dev);
394 return -ENOMEM; 378 return err;
395} 379}
396 380
397static int gfar_remove(struct device *device) 381static int gfar_remove(struct device *device)
@@ -402,108 +386,41 @@ static int gfar_remove(struct device *device)
402 dev_set_drvdata(device, NULL); 386 dev_set_drvdata(device, NULL);
403 387
404 iounmap((void *) priv->regs); 388 iounmap((void *) priv->regs);
405 iounmap((void *) priv->phyregs);
406 free_netdev(dev); 389 free_netdev(dev);
407 390
408 return 0; 391 return 0;
409} 392}
410 393
411 394
412/* Configure the PHY for dev. 395/* Initializes driver's PHY state, and attaches to the PHY.
413 * returns 0 if success. -1 if failure 396 * Returns 0 on success.
414 */ 397 */
415static int init_phy(struct net_device *dev) 398static int init_phy(struct net_device *dev)
416{ 399{
417 struct gfar_private *priv = netdev_priv(dev); 400 struct gfar_private *priv = netdev_priv(dev);
418 struct phy_info *curphy; 401 uint gigabit_support =
419 unsigned int timeout = PHY_INIT_TIMEOUT; 402 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
420 struct gfar *phyregs = priv->phyregs; 403 SUPPORTED_1000baseT_Full : 0;
421 struct gfar_mii_info *mii_info; 404 struct phy_device *phydev;
422 int err;
423 405
424 priv->oldlink = 0; 406 priv->oldlink = 0;
425 priv->oldspeed = 0; 407 priv->oldspeed = 0;
426 priv->oldduplex = -1; 408 priv->oldduplex = -1;
427 409
428 mii_info = kmalloc(sizeof(struct gfar_mii_info), 410 phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0);
429 GFP_KERNEL);
430
431 if(NULL == mii_info) {
432 if (netif_msg_ifup(priv))
433 printk(KERN_ERR "%s: Could not allocate mii_info\n",
434 dev->name);
435 return -ENOMEM;
436 }
437
438 mii_info->speed = SPEED_1000;
439 mii_info->duplex = DUPLEX_FULL;
440 mii_info->pause = 0;
441 mii_info->link = 1;
442
443 mii_info->advertising = (ADVERTISED_10baseT_Half |
444 ADVERTISED_10baseT_Full |
445 ADVERTISED_100baseT_Half |
446 ADVERTISED_100baseT_Full |
447 ADVERTISED_1000baseT_Full);
448 mii_info->autoneg = 1;
449 411
450 spin_lock_init(&mii_info->mdio_lock); 412 if (IS_ERR(phydev)) {
451 413 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
452 mii_info->mii_id = priv->einfo->phyid; 414 return PTR_ERR(phydev);
453
454 mii_info->dev = dev;
455
456 mii_info->mdio_read = &read_phy_reg;
457 mii_info->mdio_write = &write_phy_reg;
458
459 priv->mii_info = mii_info;
460
461 /* Reset the management interface */
462 gfar_write(&phyregs->miimcfg, MIIMCFG_RESET);
463
464 /* Setup the MII Mgmt clock speed */
465 gfar_write(&phyregs->miimcfg, MIIMCFG_INIT_VALUE);
466
467 /* Wait until the bus is free */
468 while ((gfar_read(&phyregs->miimind) & MIIMIND_BUSY) &&
469 timeout--)
470 cpu_relax();
471
472 if(timeout <= 0) {
473 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
474 dev->name);
475 err = -1;
476 goto bus_fail;
477 }
478
479 /* get info for this PHY */
480 curphy = get_phy_info(priv->mii_info);
481
482 if (curphy == NULL) {
483 if (netif_msg_ifup(priv))
484 printk(KERN_ERR "%s: No PHY found\n", dev->name);
485 err = -1;
486 goto no_phy;
487 } 415 }
488 416
489 mii_info->phyinfo = curphy; 417 /* Remove any features not supported by the controller */
418 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
419 phydev->advertising = phydev->supported;
490 420
491 /* Run the commands which initialize the PHY */ 421 priv->phydev = phydev;
492 if(curphy->init) {
493 err = curphy->init(priv->mii_info);
494
495 if (err)
496 goto phy_init_fail;
497 }
498 422
499 return 0; 423 return 0;
500
501phy_init_fail:
502no_phy:
503bus_fail:
504 kfree(mii_info);
505
506 return err;
507} 424}
508 425
509static void init_registers(struct net_device *dev) 426static void init_registers(struct net_device *dev)
@@ -603,24 +520,13 @@ void stop_gfar(struct net_device *dev)
603 struct gfar *regs = priv->regs; 520 struct gfar *regs = priv->regs;
604 unsigned long flags; 521 unsigned long flags;
605 522
523 phy_stop(priv->phydev);
524
606 /* Lock it down */ 525 /* Lock it down */
607 spin_lock_irqsave(&priv->lock, flags); 526 spin_lock_irqsave(&priv->lock, flags);
608 527
609 /* Tell the kernel the link is down */
610 priv->mii_info->link = 0;
611 adjust_link(dev);
612
613 gfar_halt(dev); 528 gfar_halt(dev);
614 529
615 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
616 /* Clear any pending interrupts */
617 mii_clear_phy_interrupt(priv->mii_info);
618
619 /* Disable PHY Interrupts */
620 mii_configure_phy_interrupt(priv->mii_info,
621 MII_INTERRUPT_DISABLED);
622 }
623
624 spin_unlock_irqrestore(&priv->lock, flags); 530 spin_unlock_irqrestore(&priv->lock, flags);
625 531
626 /* Free the IRQs */ 532 /* Free the IRQs */
@@ -629,13 +535,7 @@ void stop_gfar(struct net_device *dev)
629 free_irq(priv->interruptTransmit, dev); 535 free_irq(priv->interruptTransmit, dev);
630 free_irq(priv->interruptReceive, dev); 536 free_irq(priv->interruptReceive, dev);
631 } else { 537 } else {
632 free_irq(priv->interruptTransmit, dev); 538 free_irq(priv->interruptTransmit, dev);
633 }
634
635 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
636 free_irq(priv->einfo->interruptPHY, dev);
637 } else {
638 del_timer_sync(&priv->phy_info_timer);
639 } 539 }
640 540
641 free_skb_resources(priv); 541 free_skb_resources(priv);
@@ -649,7 +549,7 @@ void stop_gfar(struct net_device *dev)
649 549
650/* If there are any tx skbs or rx skbs still around, free them. 550/* If there are any tx skbs or rx skbs still around, free them.
651 * Then free tx_skbuff and rx_skbuff */ 551 * Then free tx_skbuff and rx_skbuff */
652void free_skb_resources(struct gfar_private *priv) 552static void free_skb_resources(struct gfar_private *priv)
653{ 553{
654 struct rxbd8 *rxbdp; 554 struct rxbd8 *rxbdp;
655 struct txbd8 *txbdp; 555 struct txbd8 *txbdp;
@@ -770,7 +670,7 @@ int startup_gfar(struct net_device *dev)
770 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 670 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
771 priv->tx_ring_size, GFP_KERNEL); 671 priv->tx_ring_size, GFP_KERNEL);
772 672
773 if (priv->tx_skbuff == NULL) { 673 if (NULL == priv->tx_skbuff) {
774 if (netif_msg_ifup(priv)) 674 if (netif_msg_ifup(priv))
775 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n", 675 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
776 dev->name); 676 dev->name);
@@ -785,7 +685,7 @@ int startup_gfar(struct net_device *dev)
785 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) * 685 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
786 priv->rx_ring_size, GFP_KERNEL); 686 priv->rx_ring_size, GFP_KERNEL);
787 687
788 if (priv->rx_skbuff == NULL) { 688 if (NULL == priv->rx_skbuff) {
789 if (netif_msg_ifup(priv)) 689 if (netif_msg_ifup(priv))
790 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n", 690 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
791 dev->name); 691 dev->name);
@@ -879,13 +779,7 @@ int startup_gfar(struct net_device *dev)
879 } 779 }
880 } 780 }
881 781
882 /* Set up the PHY change work queue */ 782 phy_start(priv->phydev);
883 INIT_WORK(&priv->tq, gfar_phy_change, dev);
884
885 init_timer(&priv->phy_info_timer);
886 priv->phy_info_timer.function = &gfar_phy_startup_timer;
887 priv->phy_info_timer.data = (unsigned long) priv->mii_info;
888 mod_timer(&priv->phy_info_timer, jiffies + HZ);
889 783
890 /* Configure the coalescing support */ 784 /* Configure the coalescing support */
891 if (priv->txcoalescing) 785 if (priv->txcoalescing)
@@ -933,11 +827,6 @@ tx_skb_fail:
933 priv->tx_bd_base, 827 priv->tx_bd_base,
934 gfar_read(&regs->tbase0)); 828 gfar_read(&regs->tbase0));
935 829
936 if (priv->mii_info->phyinfo->close)
937 priv->mii_info->phyinfo->close(priv->mii_info);
938
939 kfree(priv->mii_info);
940
941 return err; 830 return err;
942} 831}
943 832
@@ -1035,7 +924,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1035 txbdp->status &= TXBD_WRAP; 924 txbdp->status &= TXBD_WRAP;
1036 925
1037 /* Set up checksumming */ 926 /* Set up checksumming */
1038 if ((dev->features & NETIF_F_IP_CSUM) 927 if ((dev->features & NETIF_F_IP_CSUM)
1039 && (CHECKSUM_HW == skb->ip_summed)) { 928 && (CHECKSUM_HW == skb->ip_summed)) {
1040 fcb = gfar_add_fcb(skb, txbdp); 929 fcb = gfar_add_fcb(skb, txbdp);
1041 gfar_tx_checksum(skb, fcb); 930 gfar_tx_checksum(skb, fcb);
@@ -1103,11 +992,9 @@ static int gfar_close(struct net_device *dev)
1103 struct gfar_private *priv = netdev_priv(dev); 992 struct gfar_private *priv = netdev_priv(dev);
1104 stop_gfar(dev); 993 stop_gfar(dev);
1105 994
1106 /* Shutdown the PHY */ 995 /* Disconnect from the PHY */
1107 if (priv->mii_info->phyinfo->close) 996 phy_disconnect(priv->phydev);
1108 priv->mii_info->phyinfo->close(priv->mii_info); 997 priv->phydev = NULL;
1109
1110 kfree(priv->mii_info);
1111 998
1112 netif_stop_queue(dev); 999 netif_stop_queue(dev);
1113 1000
@@ -1343,7 +1230,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1343 while ((!skb) && timeout--) 1230 while ((!skb) && timeout--)
1344 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT); 1231 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1345 1232
1346 if (skb == NULL) 1233 if (NULL == skb)
1347 return NULL; 1234 return NULL;
1348 1235
1349 /* We need the data buffer to be aligned properly. We will reserve 1236 /* We need the data buffer to be aligned properly. We will reserve
@@ -1490,7 +1377,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1490 struct gfar_private *priv = netdev_priv(dev); 1377 struct gfar_private *priv = netdev_priv(dev);
1491 struct rxfcb *fcb = NULL; 1378 struct rxfcb *fcb = NULL;
1492 1379
1493 if (skb == NULL) { 1380 if (NULL == skb) {
1494 if (netif_msg_rx_err(priv)) 1381 if (netif_msg_rx_err(priv))
1495 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name); 1382 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1496 priv->stats.rx_dropped++; 1383 priv->stats.rx_dropped++;
@@ -1718,131 +1605,9 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1718 return IRQ_HANDLED; 1605 return IRQ_HANDLED;
1719} 1606}
1720 1607
1721static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1722{
1723 struct net_device *dev = (struct net_device *) dev_id;
1724 struct gfar_private *priv = netdev_priv(dev);
1725
1726 /* Clear the interrupt */
1727 mii_clear_phy_interrupt(priv->mii_info);
1728
1729 /* Disable PHY interrupts */
1730 mii_configure_phy_interrupt(priv->mii_info,
1731 MII_INTERRUPT_DISABLED);
1732
1733 /* Schedule the phy change */
1734 schedule_work(&priv->tq);
1735
1736 return IRQ_HANDLED;
1737}
1738
1739/* Scheduled by the phy_interrupt/timer to handle PHY changes */
1740static void gfar_phy_change(void *data)
1741{
1742 struct net_device *dev = (struct net_device *) data;
1743 struct gfar_private *priv = netdev_priv(dev);
1744 int result = 0;
1745
1746 /* Delay to give the PHY a chance to change the
1747 * register state */
1748 msleep(1);
1749
1750 /* Update the link, speed, duplex */
1751 result = priv->mii_info->phyinfo->read_status(priv->mii_info);
1752
1753 /* Adjust the known status as long as the link
1754 * isn't still coming up */
1755 if((0 == result) || (priv->mii_info->link == 0))
1756 adjust_link(dev);
1757
1758 /* Reenable interrupts, if needed */
1759 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR)
1760 mii_configure_phy_interrupt(priv->mii_info,
1761 MII_INTERRUPT_ENABLED);
1762}
1763
1764/* Called every so often on systems that don't interrupt
1765 * the core for PHY changes */
1766static void gfar_phy_timer(unsigned long data)
1767{
1768 struct net_device *dev = (struct net_device *) data;
1769 struct gfar_private *priv = netdev_priv(dev);
1770
1771 schedule_work(&priv->tq);
1772
1773 mod_timer(&priv->phy_info_timer, jiffies +
1774 GFAR_PHY_CHANGE_TIME * HZ);
1775}
1776
1777/* Keep trying aneg for some time
1778 * If, after GFAR_AN_TIMEOUT seconds, it has not
1779 * finished, we switch to forced.
1780 * Either way, once the process has completed, we either
1781 * request the interrupt, or switch the timer over to
1782 * using gfar_phy_timer to check status */
1783static void gfar_phy_startup_timer(unsigned long data)
1784{
1785 int result;
1786 static int secondary = GFAR_AN_TIMEOUT;
1787 struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
1788 struct gfar_private *priv = netdev_priv(mii_info->dev);
1789
1790 /* Configure the Auto-negotiation */
1791 result = mii_info->phyinfo->config_aneg(mii_info);
1792
1793 /* If autonegotiation failed to start, and
1794 * we haven't timed out, reset the timer, and return */
1795 if (result && secondary--) {
1796 mod_timer(&priv->phy_info_timer, jiffies + HZ);
1797 return;
1798 } else if (result) {
1799 /* Couldn't start autonegotiation.
1800 * Try switching to forced */
1801 mii_info->autoneg = 0;
1802 result = mii_info->phyinfo->config_aneg(mii_info);
1803
1804 /* Forcing failed! Give up */
1805 if(result) {
1806 if (netif_msg_link(priv))
1807 printk(KERN_ERR "%s: Forcing failed!\n",
1808 mii_info->dev->name);
1809 return;
1810 }
1811 }
1812
1813 /* Kill the timer so it can be restarted */
1814 del_timer_sync(&priv->phy_info_timer);
1815
1816 /* Grab the PHY interrupt, if necessary/possible */
1817 if (priv->einfo->board_flags & FSL_GIANFAR_BRD_HAS_PHY_INTR) {
1818 if (request_irq(priv->einfo->interruptPHY,
1819 phy_interrupt,
1820 SA_SHIRQ,
1821 "phy_interrupt",
1822 mii_info->dev) < 0) {
1823 if (netif_msg_intr(priv))
1824 printk(KERN_ERR "%s: Can't get IRQ %d (PHY)\n",
1825 mii_info->dev->name,
1826 priv->einfo->interruptPHY);
1827 } else {
1828 mii_configure_phy_interrupt(priv->mii_info,
1829 MII_INTERRUPT_ENABLED);
1830 return;
1831 }
1832 }
1833
1834 /* Start the timer again, this time in order to
1835 * handle a change in status */
1836 init_timer(&priv->phy_info_timer);
1837 priv->phy_info_timer.function = &gfar_phy_timer;
1838 priv->phy_info_timer.data = (unsigned long) mii_info->dev;
1839 mod_timer(&priv->phy_info_timer, jiffies +
1840 GFAR_PHY_CHANGE_TIME * HZ);
1841}
1842
1843/* Called every time the controller might need to be made 1608/* Called every time the controller might need to be made
1844 * aware of new link state. The PHY code conveys this 1609 * aware of new link state. The PHY code conveys this
1845 * information through variables in the priv structure, and this 1610 * information through variables in the phydev structure, and this
1846 * function converts those variables into the appropriate 1611 * function converts those variables into the appropriate
1847 * register values, and can bring down the device if needed. 1612 * register values, and can bring down the device if needed.
1848 */ 1613 */
@@ -1850,84 +1615,68 @@ static void adjust_link(struct net_device *dev)
1850{ 1615{
1851 struct gfar_private *priv = netdev_priv(dev); 1616 struct gfar_private *priv = netdev_priv(dev);
1852 struct gfar *regs = priv->regs; 1617 struct gfar *regs = priv->regs;
1853 u32 tempval; 1618 unsigned long flags;
1854 struct gfar_mii_info *mii_info = priv->mii_info; 1619 struct phy_device *phydev = priv->phydev;
1620 int new_state = 0;
1621
1622 spin_lock_irqsave(&priv->lock, flags);
1623 if (phydev->link) {
1624 u32 tempval = gfar_read(&regs->maccfg2);
1855 1625
1856 if (mii_info->link) {
1857 /* Now we make sure that we can be in full duplex mode. 1626 /* Now we make sure that we can be in full duplex mode.
1858 * If not, we operate in half-duplex mode. */ 1627 * If not, we operate in half-duplex mode. */
1859 if (mii_info->duplex != priv->oldduplex) { 1628 if (phydev->duplex != priv->oldduplex) {
1860 if (!(mii_info->duplex)) { 1629 new_state = 1;
1861 tempval = gfar_read(&regs->maccfg2); 1630 if (!(phydev->duplex))
1862 tempval &= ~(MACCFG2_FULL_DUPLEX); 1631 tempval &= ~(MACCFG2_FULL_DUPLEX);
1863 gfar_write(&regs->maccfg2, tempval); 1632 else
1864
1865 if (netif_msg_link(priv))
1866 printk(KERN_INFO "%s: Half Duplex\n",
1867 dev->name);
1868 } else {
1869 tempval = gfar_read(&regs->maccfg2);
1870 tempval |= MACCFG2_FULL_DUPLEX; 1633 tempval |= MACCFG2_FULL_DUPLEX;
1871 gfar_write(&regs->maccfg2, tempval);
1872 1634
1873 if (netif_msg_link(priv)) 1635 priv->oldduplex = phydev->duplex;
1874 printk(KERN_INFO "%s: Full Duplex\n",
1875 dev->name);
1876 }
1877
1878 priv->oldduplex = mii_info->duplex;
1879 } 1636 }
1880 1637
1881 if (mii_info->speed != priv->oldspeed) { 1638 if (phydev->speed != priv->oldspeed) {
1882 switch (mii_info->speed) { 1639 new_state = 1;
1640 switch (phydev->speed) {
1883 case 1000: 1641 case 1000:
1884 tempval = gfar_read(&regs->maccfg2);
1885 tempval = 1642 tempval =
1886 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); 1643 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1887 gfar_write(&regs->maccfg2, tempval);
1888 break; 1644 break;
1889 case 100: 1645 case 100:
1890 case 10: 1646 case 10:
1891 tempval = gfar_read(&regs->maccfg2);
1892 tempval = 1647 tempval =
1893 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); 1648 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1894 gfar_write(&regs->maccfg2, tempval);
1895 break; 1649 break;
1896 default: 1650 default:
1897 if (netif_msg_link(priv)) 1651 if (netif_msg_link(priv))
1898 printk(KERN_WARNING 1652 printk(KERN_WARNING
1899 "%s: Ack! Speed (%d) is not 10/100/1000!\n", 1653 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1900 dev->name, mii_info->speed); 1654 dev->name, phydev->speed);
1901 break; 1655 break;
1902 } 1656 }
1903 1657
1904 if (netif_msg_link(priv)) 1658 priv->oldspeed = phydev->speed;
1905 printk(KERN_INFO "%s: Speed %dBT\n", dev->name,
1906 mii_info->speed);
1907
1908 priv->oldspeed = mii_info->speed;
1909 } 1659 }
1910 1660
1661 gfar_write(&regs->maccfg2, tempval);
1662
1911 if (!priv->oldlink) { 1663 if (!priv->oldlink) {
1912 if (netif_msg_link(priv)) 1664 new_state = 1;
1913 printk(KERN_INFO "%s: Link is up\n", dev->name);
1914 priv->oldlink = 1; 1665 priv->oldlink = 1;
1915 netif_carrier_on(dev);
1916 netif_schedule(dev); 1666 netif_schedule(dev);
1917 } 1667 }
1918 } else { 1668 } else if (priv->oldlink) {
1919 if (priv->oldlink) { 1669 new_state = 1;
1920 if (netif_msg_link(priv)) 1670 priv->oldlink = 0;
1921 printk(KERN_INFO "%s: Link is down\n", 1671 priv->oldspeed = 0;
1922 dev->name); 1672 priv->oldduplex = -1;
1923 priv->oldlink = 0;
1924 priv->oldspeed = 0;
1925 priv->oldduplex = -1;
1926 netif_carrier_off(dev);
1927 }
1928 } 1673 }
1929}
1930 1674
1675 if (new_state && netif_msg_link(priv))
1676 phy_print_status(phydev);
1677
1678 spin_unlock_irqrestore(&priv->lock, flags);
1679}
1931 1680
1932/* Update the hash table based on the current list of multicast 1681/* Update the hash table based on the current list of multicast
1933 * addresses we subscribe to. Also, change the promiscuity of 1682 * addresses we subscribe to. Also, change the promiscuity of
@@ -2122,12 +1871,23 @@ static struct device_driver gfar_driver = {
2122 1871
2123static int __init gfar_init(void) 1872static int __init gfar_init(void)
2124{ 1873{
2125 return driver_register(&gfar_driver); 1874 int err = gfar_mdio_init();
1875
1876 if (err)
1877 return err;
1878
1879 err = driver_register(&gfar_driver);
1880
1881 if (err)
1882 gfar_mdio_exit();
1883
1884 return err;
2126} 1885}
2127 1886
2128static void __exit gfar_exit(void) 1887static void __exit gfar_exit(void)
2129{ 1888{
2130 driver_unregister(&gfar_driver); 1889 driver_unregister(&gfar_driver);
1890 gfar_mdio_exit();
2131} 1891}
2132 1892
2133module_init(gfar_init); 1893module_init(gfar_init);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 28af087d9fbb..c77ca6c0d04a 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -17,7 +17,6 @@
17 * 17 *
18 * Still left to do: 18 * Still left to do:
19 * -Add support for module parameters 19 * -Add support for module parameters
20 * -Add support for ethtool -s
21 * -Add patch for ethtool phys id 20 * -Add patch for ethtool phys id
22 */ 21 */
23#ifndef __GIANFAR_H 22#ifndef __GIANFAR_H
@@ -37,7 +36,8 @@
37#include <linux/skbuff.h> 36#include <linux/skbuff.h>
38#include <linux/spinlock.h> 37#include <linux/spinlock.h>
39#include <linux/mm.h> 38#include <linux/mm.h>
40#include <linux/fsl_devices.h> 39#include <linux/mii.h>
40#include <linux/phy.h>
41 41
42#include <asm/io.h> 42#include <asm/io.h>
43#include <asm/irq.h> 43#include <asm/irq.h>
@@ -48,7 +48,8 @@
48#include <linux/workqueue.h> 48#include <linux/workqueue.h>
49#include <linux/ethtool.h> 49#include <linux/ethtool.h>
50#include <linux/netdevice.h> 50#include <linux/netdevice.h>
51#include "gianfar_phy.h" 51#include <linux/fsl_devices.h>
52#include "gianfar_mii.h"
52 53
53/* The maximum number of packets to be handled in one call of gfar_poll */ 54/* The maximum number of packets to be handled in one call of gfar_poll */
54#define GFAR_DEV_WEIGHT 64 55#define GFAR_DEV_WEIGHT 64
@@ -73,7 +74,7 @@
73#define PHY_INIT_TIMEOUT 100000 74#define PHY_INIT_TIMEOUT 100000
74#define GFAR_PHY_CHANGE_TIME 2 75#define GFAR_PHY_CHANGE_TIME 2
75 76
76#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.1, " 77#define DEVICE_NAME "%s: Gianfar Ethernet Controller Version 1.2, "
77#define DRV_NAME "gfar-enet" 78#define DRV_NAME "gfar-enet"
78extern const char gfar_driver_name[]; 79extern const char gfar_driver_name[];
79extern const char gfar_driver_version[]; 80extern const char gfar_driver_version[];
@@ -578,12 +579,7 @@ struct gfar {
578 u32 hafdup; /* 0x.50c - Half Duplex Register */ 579 u32 hafdup; /* 0x.50c - Half Duplex Register */
579 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ 580 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
580 u8 res18[12]; 581 u8 res18[12];
581 u32 miimcfg; /* 0x.520 - MII Management Configuration Register */ 582 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
582 u32 miimcom; /* 0x.524 - MII Management Command Register */
583 u32 miimadd; /* 0x.528 - MII Management Address Register */
584 u32 miimcon; /* 0x.52c - MII Management Control Register */
585 u32 miimstat; /* 0x.530 - MII Management Status Register */
586 u32 miimind; /* 0x.534 - MII Management Indicator Register */
587 u8 res19[4]; 583 u8 res19[4];
588 u32 ifstat; /* 0x.53c - Interface Status Register */ 584 u32 ifstat; /* 0x.53c - Interface Status Register */
589 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ 585 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
@@ -688,9 +684,6 @@ struct gfar_private {
688 struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */ 684 struct gfar *regs; /* Pointer to the GFAR memory mapped Registers */
689 u32 *hash_regs[16]; 685 u32 *hash_regs[16];
690 int hash_width; 686 int hash_width;
691 struct gfar *phyregs;
692 struct work_struct tq;
693 struct timer_list phy_info_timer;
694 struct net_device_stats stats; /* linux network statistics */ 687 struct net_device_stats stats; /* linux network statistics */
695 struct gfar_extra_stats extra_stats; 688 struct gfar_extra_stats extra_stats;
696 spinlock_t lock; 689 spinlock_t lock;
@@ -710,7 +703,8 @@ struct gfar_private {
710 unsigned int interruptError; 703 unsigned int interruptError;
711 struct gianfar_platform_data *einfo; 704 struct gianfar_platform_data *einfo;
712 705
713 struct gfar_mii_info *mii_info; 706 struct phy_device *phydev;
707 struct mii_bus *mii_bus;
714 int oldspeed; 708 int oldspeed;
715 int oldduplex; 709 int oldduplex;
716 int oldlink; 710 int oldlink;
@@ -732,4 +726,12 @@ extern inline void gfar_write(volatile unsigned *addr, u32 val)
732 726
733extern struct ethtool_ops *gfar_op_array[]; 727extern struct ethtool_ops *gfar_op_array[];
734 728
729extern irqreturn_t gfar_receive(int irq, void *dev_id, struct pt_regs *regs);
730extern int startup_gfar(struct net_device *dev);
731extern void stop_gfar(struct net_device *dev);
732extern void gfar_halt(struct net_device *dev);
733extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
734 int enable, u32 regnum, u32 read);
735void gfar_setup_stashing(struct net_device *dev);
736
735#endif /* __GIANFAR_H */ 737#endif /* __GIANFAR_H */
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index a451de629197..68e3578e7613 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -39,17 +39,18 @@
39#include <asm/types.h> 39#include <asm/types.h>
40#include <asm/uaccess.h> 40#include <asm/uaccess.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/mii.h>
43#include <linux/phy.h>
42 44
43#include "gianfar.h" 45#include "gianfar.h"
44 46
45#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0)) 47#define is_power_of_2(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
46 48
47extern int startup_gfar(struct net_device *dev);
48extern void stop_gfar(struct net_device *dev);
49extern void gfar_halt(struct net_device *dev);
50extern void gfar_start(struct net_device *dev); 49extern void gfar_start(struct net_device *dev);
51extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 50extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
52 51
52#define GFAR_MAX_COAL_USECS 0xffff
53#define GFAR_MAX_COAL_FRAMES 0xff
53static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 54static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
54 u64 * buf); 55 u64 * buf);
55static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf); 56static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
@@ -182,38 +183,32 @@ static void gfar_gdrvinfo(struct net_device *dev, struct
182 drvinfo->eedump_len = 0; 183 drvinfo->eedump_len = 0;
183} 184}
184 185
186
187static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
188{
189 struct gfar_private *priv = netdev_priv(dev);
190 struct phy_device *phydev = priv->phydev;
191
192 if (NULL == phydev)
193 return -ENODEV;
194
195 return phy_ethtool_sset(phydev, cmd);
196}
197
198
185/* Return the current settings in the ethtool_cmd structure */ 199/* Return the current settings in the ethtool_cmd structure */
186static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) 200static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
187{ 201{
188 struct gfar_private *priv = netdev_priv(dev); 202 struct gfar_private *priv = netdev_priv(dev);
189 uint gigabit_support = 203 struct phy_device *phydev = priv->phydev;
190 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 204
191 SUPPORTED_1000baseT_Full : 0; 205 if (NULL == phydev)
192 uint gigabit_advert = 206 return -ENODEV;
193 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? 207
194 ADVERTISED_1000baseT_Full: 0;
195
196 cmd->supported = (SUPPORTED_10baseT_Half
197 | SUPPORTED_100baseT_Half
198 | SUPPORTED_100baseT_Full
199 | gigabit_support | SUPPORTED_Autoneg);
200
201 /* For now, we always advertise everything */
202 cmd->advertising = (ADVERTISED_10baseT_Half
203 | ADVERTISED_100baseT_Half
204 | ADVERTISED_100baseT_Full
205 | gigabit_advert | ADVERTISED_Autoneg);
206
207 cmd->speed = priv->mii_info->speed;
208 cmd->duplex = priv->mii_info->duplex;
209 cmd->port = PORT_MII;
210 cmd->phy_address = priv->mii_info->mii_id;
211 cmd->transceiver = XCVR_EXTERNAL;
212 cmd->autoneg = AUTONEG_ENABLE;
213 cmd->maxtxpkt = priv->txcount; 208 cmd->maxtxpkt = priv->txcount;
214 cmd->maxrxpkt = priv->rxcount; 209 cmd->maxrxpkt = priv->rxcount;
215 210
216 return 0; 211 return phy_ethtool_gset(phydev, cmd);
217} 212}
218 213
219/* Return the length of the register structure */ 214/* Return the length of the register structure */
@@ -241,14 +236,14 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
241 unsigned int count; 236 unsigned int count;
242 237
243 /* The timer is different, depending on the interface speed */ 238 /* The timer is different, depending on the interface speed */
244 switch (priv->mii_info->speed) { 239 switch (priv->phydev->speed) {
245 case 1000: 240 case SPEED_1000:
246 count = GFAR_GBIT_TIME; 241 count = GFAR_GBIT_TIME;
247 break; 242 break;
248 case 100: 243 case SPEED_100:
249 count = GFAR_100_TIME; 244 count = GFAR_100_TIME;
250 break; 245 break;
251 case 10: 246 case SPEED_10:
252 default: 247 default:
253 count = GFAR_10_TIME; 248 count = GFAR_10_TIME;
254 break; 249 break;
@@ -265,14 +260,14 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
265 unsigned int count; 260 unsigned int count;
266 261
267 /* The timer is different, depending on the interface speed */ 262 /* The timer is different, depending on the interface speed */
268 switch (priv->mii_info->speed) { 263 switch (priv->phydev->speed) {
269 case 1000: 264 case SPEED_1000:
270 count = GFAR_GBIT_TIME; 265 count = GFAR_GBIT_TIME;
271 break; 266 break;
272 case 100: 267 case SPEED_100:
273 count = GFAR_100_TIME; 268 count = GFAR_100_TIME;
274 break; 269 break;
275 case 10: 270 case SPEED_10:
276 default: 271 default:
277 count = GFAR_10_TIME; 272 count = GFAR_10_TIME;
278 break; 273 break;
@@ -292,6 +287,9 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
292 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 287 if (!(priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
293 return -EOPNOTSUPP; 288 return -EOPNOTSUPP;
294 289
290 if (NULL == priv->phydev)
291 return -ENODEV;
292
295 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime); 293 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, priv->rxtime);
296 cvals->rx_max_coalesced_frames = priv->rxcount; 294 cvals->rx_max_coalesced_frames = priv->rxcount;
297 295
@@ -348,6 +346,22 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
348 else 346 else
349 priv->rxcoalescing = 1; 347 priv->rxcoalescing = 1;
350 348
349 if (NULL == priv->phydev)
350 return -ENODEV;
351
352 /* Check the bounds of the values */
353 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
354 pr_info("Coalescing is limited to %d microseconds\n",
355 GFAR_MAX_COAL_USECS);
356 return -EINVAL;
357 }
358
359 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
360 pr_info("Coalescing is limited to %d frames\n",
361 GFAR_MAX_COAL_FRAMES);
362 return -EINVAL;
363 }
364
351 priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs); 365 priv->rxtime = gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs);
352 priv->rxcount = cvals->rx_max_coalesced_frames; 366 priv->rxcount = cvals->rx_max_coalesced_frames;
353 367
@@ -358,6 +372,19 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
358 else 372 else
359 priv->txcoalescing = 1; 373 priv->txcoalescing = 1;
360 374
375 /* Check the bounds of the values */
376 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
377 pr_info("Coalescing is limited to %d microseconds\n",
378 GFAR_MAX_COAL_USECS);
379 return -EINVAL;
380 }
381
382 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
383 pr_info("Coalescing is limited to %d frames\n",
384 GFAR_MAX_COAL_FRAMES);
385 return -EINVAL;
386 }
387
361 priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs); 388 priv->txtime = gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs);
362 priv->txcount = cvals->tx_max_coalesced_frames; 389 priv->txcount = cvals->tx_max_coalesced_frames;
363 390
@@ -536,6 +563,7 @@ static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
536 563
537struct ethtool_ops gfar_ethtool_ops = { 564struct ethtool_ops gfar_ethtool_ops = {
538 .get_settings = gfar_gsettings, 565 .get_settings = gfar_gsettings,
566 .set_settings = gfar_ssettings,
539 .get_drvinfo = gfar_gdrvinfo, 567 .get_drvinfo = gfar_gdrvinfo,
540 .get_regs_len = gfar_reglen, 568 .get_regs_len = gfar_reglen,
541 .get_regs = gfar_get_regs, 569 .get_regs = gfar_get_regs,
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
new file mode 100644
index 000000000000..1eca1dbca7f1
--- /dev/null
+++ b/drivers/net/gianfar_mii.c
@@ -0,0 +1,219 @@
1/*
2 * drivers/net/gianfar_mii.c
3 *
4 * Gianfar Ethernet Driver -- MIIM bus implementation
5 * Provides Bus interface for MIIM regs
6 *
7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
9 *
10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/string.h>
23#include <linux/errno.h>
24#include <linux/unistd.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/version.h>
36#include <asm/ocp.h>
37#include <linux/crc32.h>
38#include <linux/mii.h>
39#include <linux/phy.h>
40
41#include <asm/io.h>
42#include <asm/irq.h>
43#include <asm/uaccess.h>
44
45#include "gianfar.h"
46#include "gianfar_mii.h"
47
48/* Write value to the PHY at mii_id at register regnum,
49 * on the bus, waiting until the write is done before returning.
50 * All PHY configuration is done through the TSEC1 MIIM regs */
51int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
52{
53 struct gfar_mii *regs = bus->priv;
54
55 /* Set the PHY address and the register address we want to write */
56 gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
57
58 /* Write out the value we want */
59 gfar_write(&regs->miimcon, value);
60
61 /* Wait for the transaction to finish */
62 while (gfar_read(&regs->miimind) & MIIMIND_BUSY)
63 cpu_relax();
64
65 return 0;
66}
67
68/* Read the bus for PHY at addr mii_id, register regnum, and
69 * return the value. Clears miimcom first. All PHY
70 * configuration has to be done through the TSEC1 MIIM regs */
71int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
72{
73 struct gfar_mii *regs = bus->priv;
74 u16 value;
75
76 /* Set the PHY address and the register address we want to read */
77 gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
78
79 /* Clear miimcom, and then initiate a read */
80 gfar_write(&regs->miimcom, 0);
81 gfar_write(&regs->miimcom, MII_READ_COMMAND);
82
83 /* Wait for the transaction to finish */
84 while (gfar_read(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
85 cpu_relax();
86
87 /* Grab the value of the register from miimstat */
88 value = gfar_read(&regs->miimstat);
89
90 return value;
91}
92
93
94/* Reset the MIIM registers, and wait for the bus to free */
95int gfar_mdio_reset(struct mii_bus *bus)
96{
97 struct gfar_mii *regs = bus->priv;
98 unsigned int timeout = PHY_INIT_TIMEOUT;
99
100 spin_lock_bh(&bus->mdio_lock);
101
102 /* Reset the management interface */
103 gfar_write(&regs->miimcfg, MIIMCFG_RESET);
104
105 /* Setup the MII Mgmt clock speed */
106 gfar_write(&regs->miimcfg, MIIMCFG_INIT_VALUE);
107
108 /* Wait until the bus is free */
109 while ((gfar_read(&regs->miimind) & MIIMIND_BUSY) &&
110 timeout--)
111 cpu_relax();
112
113 spin_unlock_bh(&bus->mdio_lock);
114
115 if(timeout <= 0) {
116 printk(KERN_ERR "%s: The MII Bus is stuck!\n",
117 bus->name);
118 return -EBUSY;
119 }
120
121 return 0;
122}
123
124
125int gfar_mdio_probe(struct device *dev)
126{
127 struct platform_device *pdev = to_platform_device(dev);
128 struct gianfar_mdio_data *pdata;
129 struct gfar_mii *regs;
130 struct mii_bus *new_bus;
131 int err = 0;
132
133 if (NULL == dev)
134 return -EINVAL;
135
136 new_bus = kmalloc(sizeof(struct mii_bus), GFP_KERNEL);
137
138 if (NULL == new_bus)
139 return -ENOMEM;
140
141 new_bus->name = "Gianfar MII Bus",
142 new_bus->read = &gfar_mdio_read,
143 new_bus->write = &gfar_mdio_write,
144 new_bus->reset = &gfar_mdio_reset,
145 new_bus->id = pdev->id;
146
147 pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
148
149 if (NULL == pdata) {
150 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
151 return -ENODEV;
152 }
153
154 /* Set the PHY base address */
155 regs = (struct gfar_mii *) ioremap(pdata->paddr,
156 sizeof (struct gfar_mii));
157
158 if (NULL == regs) {
159 err = -ENOMEM;
160 goto reg_map_fail;
161 }
162
163 new_bus->priv = regs;
164
165 new_bus->irq = pdata->irq;
166
167 new_bus->dev = dev;
168 dev_set_drvdata(dev, new_bus);
169
170 err = mdiobus_register(new_bus);
171
172 if (0 != err) {
173 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
174 new_bus->name);
175 goto bus_register_fail;
176 }
177
178 return 0;
179
180bus_register_fail:
181 iounmap((void *) regs);
182reg_map_fail:
183 kfree(new_bus);
184
185 return err;
186}
187
188
189int gfar_mdio_remove(struct device *dev)
190{
191 struct mii_bus *bus = dev_get_drvdata(dev);
192
193 mdiobus_unregister(bus);
194
195 dev_set_drvdata(dev, NULL);
196
197 iounmap((void *) (&bus->priv));
198 bus->priv = NULL;
199 kfree(bus);
200
201 return 0;
202}
203
204static struct device_driver gianfar_mdio_driver = {
205 .name = "fsl-gianfar_mdio",
206 .bus = &platform_bus_type,
207 .probe = gfar_mdio_probe,
208 .remove = gfar_mdio_remove,
209};
210
211int __init gfar_mdio_init(void)
212{
213 return driver_register(&gianfar_mdio_driver);
214}
215
216void __exit gfar_mdio_exit(void)
217{
218 driver_unregister(&gianfar_mdio_driver);
219}
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
new file mode 100644
index 000000000000..56e5665d5c9b
--- /dev/null
+++ b/drivers/net/gianfar_mii.h
@@ -0,0 +1,45 @@
1/*
2 * drivers/net/gianfar_mii.h
3 *
4 * Gianfar Ethernet Driver -- MII Management Bus Implementation
5 * Driver for the MDIO bus controller in the Gianfar register space
6 *
7 * Author: Andy Fleming
8 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
9 *
10 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#ifndef __GIANFAR_MII_H
19#define __GIANFAR_MII_H
20
21#define MIIMIND_BUSY 0x00000001
22#define MIIMIND_NOTVALID 0x00000004
23
24#define MII_READ_COMMAND 0x00000001
25
26#define GFAR_SUPPORTED (SUPPORTED_10baseT_Half \
27 | SUPPORTED_100baseT_Half \
28 | SUPPORTED_100baseT_Full \
29 | SUPPORTED_Autoneg \
30 | SUPPORTED_MII)
31
32struct gfar_mii {
33 u32 miimcfg; /* 0x.520 - MII Management Config Register */
34 u32 miimcom; /* 0x.524 - MII Management Command Register */
35 u32 miimadd; /* 0x.528 - MII Management Address Register */
36 u32 miimcon; /* 0x.52c - MII Management Control Register */
37 u32 miimstat; /* 0x.530 - MII Management Status Register */
38 u32 miimind; /* 0x.534 - MII Management Indicator Register */
39};
40
41int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
42int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
43int __init gfar_mdio_init(void);
44void __exit gfar_mdio_exit(void);
45#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/gianfar_phy.c b/drivers/net/gianfar_phy.c
deleted file mode 100644
index 7c965f268a82..000000000000
--- a/drivers/net/gianfar_phy.c
+++ /dev/null
@@ -1,661 +0,0 @@
1/*
2 * drivers/net/gianfar_phy.c
3 *
4 * Gianfar Ethernet Driver -- PHY handling
5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
10 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/uaccess.h>
38#include <linux/module.h>
39#include <linux/version.h>
40#include <linux/crc32.h>
41#include <linux/mii.h>
42
43#include "gianfar.h"
44#include "gianfar_phy.h"
45
46static void config_genmii_advert(struct gfar_mii_info *mii_info);
47static void genmii_setup_forced(struct gfar_mii_info *mii_info);
48static void genmii_restart_aneg(struct gfar_mii_info *mii_info);
49static int gbit_config_aneg(struct gfar_mii_info *mii_info);
50static int genmii_config_aneg(struct gfar_mii_info *mii_info);
51static int genmii_update_link(struct gfar_mii_info *mii_info);
52static int genmii_read_status(struct gfar_mii_info *mii_info);
53u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum);
54void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val);
55
56/* Write value to the PHY for this device to the register at regnum, */
57/* waiting until the write is done before it returns. All PHY */
58/* configuration has to be done through the TSEC1 MIIM regs */
59void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
60{
61 struct gfar_private *priv = netdev_priv(dev);
62 struct gfar *regbase = priv->phyregs;
63
64 /* Set the PHY address and the register address we want to write */
65 gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
66
67 /* Write out the value we want */
68 gfar_write(&regbase->miimcon, value);
69
70 /* Wait for the transaction to finish */
71 while (gfar_read(&regbase->miimind) & MIIMIND_BUSY)
72 cpu_relax();
73}
74
75/* Reads from register regnum in the PHY for device dev, */
76/* returning the value. Clears miimcom first. All PHY */
77/* configuration has to be done through the TSEC1 MIIM regs */
78int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
79{
80 struct gfar_private *priv = netdev_priv(dev);
81 struct gfar *regbase = priv->phyregs;
82 u16 value;
83
84 /* Set the PHY address and the register address we want to read */
85 gfar_write(&regbase->miimadd, (mii_id << 8) | regnum);
86
87 /* Clear miimcom, and then initiate a read */
88 gfar_write(&regbase->miimcom, 0);
89 gfar_write(&regbase->miimcom, MII_READ_COMMAND);
90
91 /* Wait for the transaction to finish */
92 while (gfar_read(&regbase->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
93 cpu_relax();
94
95 /* Grab the value of the register from miimstat */
96 value = gfar_read(&regbase->miimstat);
97
98 return value;
99}
100
101void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info)
102{
103 if(mii_info->phyinfo->ack_interrupt)
104 mii_info->phyinfo->ack_interrupt(mii_info);
105}
106
107
108void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts)
109{
110 mii_info->interrupts = interrupts;
111 if(mii_info->phyinfo->config_intr)
112 mii_info->phyinfo->config_intr(mii_info);
113}
114
115
116/* Writes MII_ADVERTISE with the appropriate values, after
117 * sanitizing advertise to make sure only supported features
118 * are advertised
119 */
120static void config_genmii_advert(struct gfar_mii_info *mii_info)
121{
122 u32 advertise;
123 u16 adv;
124
125 /* Only allow advertising what this PHY supports */
126 mii_info->advertising &= mii_info->phyinfo->features;
127 advertise = mii_info->advertising;
128
129 /* Setup standard advertisement */
130 adv = phy_read(mii_info, MII_ADVERTISE);
131 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
132 if (advertise & ADVERTISED_10baseT_Half)
133 adv |= ADVERTISE_10HALF;
134 if (advertise & ADVERTISED_10baseT_Full)
135 adv |= ADVERTISE_10FULL;
136 if (advertise & ADVERTISED_100baseT_Half)
137 adv |= ADVERTISE_100HALF;
138 if (advertise & ADVERTISED_100baseT_Full)
139 adv |= ADVERTISE_100FULL;
140 phy_write(mii_info, MII_ADVERTISE, adv);
141}
142
143static void genmii_setup_forced(struct gfar_mii_info *mii_info)
144{
145 u16 ctrl;
146 u32 features = mii_info->phyinfo->features;
147
148 ctrl = phy_read(mii_info, MII_BMCR);
149
150 ctrl &= ~(BMCR_FULLDPLX|BMCR_SPEED100|BMCR_SPEED1000|BMCR_ANENABLE);
151 ctrl |= BMCR_RESET;
152
153 switch(mii_info->speed) {
154 case SPEED_1000:
155 if(features & (SUPPORTED_1000baseT_Half
156 | SUPPORTED_1000baseT_Full)) {
157 ctrl |= BMCR_SPEED1000;
158 break;
159 }
160 mii_info->speed = SPEED_100;
161 case SPEED_100:
162 if (features & (SUPPORTED_100baseT_Half
163 | SUPPORTED_100baseT_Full)) {
164 ctrl |= BMCR_SPEED100;
165 break;
166 }
167 mii_info->speed = SPEED_10;
168 case SPEED_10:
169 if (features & (SUPPORTED_10baseT_Half
170 | SUPPORTED_10baseT_Full))
171 break;
172 default: /* Unsupported speed! */
173 printk(KERN_ERR "%s: Bad speed!\n",
174 mii_info->dev->name);
175 break;
176 }
177
178 phy_write(mii_info, MII_BMCR, ctrl);
179}
180
181
182/* Enable and Restart Autonegotiation */
183static void genmii_restart_aneg(struct gfar_mii_info *mii_info)
184{
185 u16 ctl;
186
187 ctl = phy_read(mii_info, MII_BMCR);
188 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
189 phy_write(mii_info, MII_BMCR, ctl);
190}
191
192
193static int gbit_config_aneg(struct gfar_mii_info *mii_info)
194{
195 u16 adv;
196 u32 advertise;
197
198 if(mii_info->autoneg) {
199 /* Configure the ADVERTISE register */
200 config_genmii_advert(mii_info);
201 advertise = mii_info->advertising;
202
203 adv = phy_read(mii_info, MII_1000BASETCONTROL);
204 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
205 MII_1000BASETCONTROL_HALFDUPLEXCAP);
206 if (advertise & SUPPORTED_1000baseT_Half)
207 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
208 if (advertise & SUPPORTED_1000baseT_Full)
209 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
210 phy_write(mii_info, MII_1000BASETCONTROL, adv);
211
212 /* Start/Restart aneg */
213 genmii_restart_aneg(mii_info);
214 } else
215 genmii_setup_forced(mii_info);
216
217 return 0;
218}
219
220static int marvell_config_aneg(struct gfar_mii_info *mii_info)
221{
222 /* The Marvell PHY has an errata which requires
223 * that certain registers get written in order
224 * to restart autonegotiation */
225 phy_write(mii_info, MII_BMCR, BMCR_RESET);
226
227 phy_write(mii_info, 0x1d, 0x1f);
228 phy_write(mii_info, 0x1e, 0x200c);
229 phy_write(mii_info, 0x1d, 0x5);
230 phy_write(mii_info, 0x1e, 0);
231 phy_write(mii_info, 0x1e, 0x100);
232
233 gbit_config_aneg(mii_info);
234
235 return 0;
236}
237static int genmii_config_aneg(struct gfar_mii_info *mii_info)
238{
239 if (mii_info->autoneg) {
240 config_genmii_advert(mii_info);
241 genmii_restart_aneg(mii_info);
242 } else
243 genmii_setup_forced(mii_info);
244
245 return 0;
246}
247
248
249static int genmii_update_link(struct gfar_mii_info *mii_info)
250{
251 u16 status;
252
253 /* Do a fake read */
254 phy_read(mii_info, MII_BMSR);
255
256 /* Read link and autonegotiation status */
257 status = phy_read(mii_info, MII_BMSR);
258 if ((status & BMSR_LSTATUS) == 0)
259 mii_info->link = 0;
260 else
261 mii_info->link = 1;
262
263 /* If we are autonegotiating, and not done,
264 * return an error */
265 if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
266 return -EAGAIN;
267
268 return 0;
269}
270
271static int genmii_read_status(struct gfar_mii_info *mii_info)
272{
273 u16 status;
274 int err;
275
276 /* Update the link, but return if there
277 * was an error */
278 err = genmii_update_link(mii_info);
279 if (err)
280 return err;
281
282 if (mii_info->autoneg) {
283 status = phy_read(mii_info, MII_LPA);
284
285 if (status & (LPA_10FULL | LPA_100FULL))
286 mii_info->duplex = DUPLEX_FULL;
287 else
288 mii_info->duplex = DUPLEX_HALF;
289 if (status & (LPA_100FULL | LPA_100HALF))
290 mii_info->speed = SPEED_100;
291 else
292 mii_info->speed = SPEED_10;
293 mii_info->pause = 0;
294 }
295 /* On non-aneg, we assume what we put in BMCR is the speed,
296 * though magic-aneg shouldn't prevent this case from occurring
297 */
298
299 return 0;
300}
301static int marvell_read_status(struct gfar_mii_info *mii_info)
302{
303 u16 status;
304 int err;
305
306 /* Update the link, but return if there
307 * was an error */
308 err = genmii_update_link(mii_info);
309 if (err)
310 return err;
311
312 /* If the link is up, read the speed and duplex */
313 /* If we aren't autonegotiating, assume speeds
314 * are as set */
315 if (mii_info->autoneg && mii_info->link) {
316 int speed;
317 status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
318
319#if 0
320 /* If speed and duplex aren't resolved,
321 * return an error. Isn't this handled
322 * by checking aneg?
323 */
324 if ((status & MII_M1011_PHY_SPEC_STATUS_RESOLVED) == 0)
325 return -EAGAIN;
326#endif
327
328 /* Get the duplexity */
329 if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
330 mii_info->duplex = DUPLEX_FULL;
331 else
332 mii_info->duplex = DUPLEX_HALF;
333
334 /* Get the speed */
335 speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
336 switch(speed) {
337 case MII_M1011_PHY_SPEC_STATUS_1000:
338 mii_info->speed = SPEED_1000;
339 break;
340 case MII_M1011_PHY_SPEC_STATUS_100:
341 mii_info->speed = SPEED_100;
342 break;
343 default:
344 mii_info->speed = SPEED_10;
345 break;
346 }
347 mii_info->pause = 0;
348 }
349
350 return 0;
351}
352
353
354static int cis820x_read_status(struct gfar_mii_info *mii_info)
355{
356 u16 status;
357 int err;
358
359 /* Update the link, but return if there
360 * was an error */
361 err = genmii_update_link(mii_info);
362 if (err)
363 return err;
364
365 /* If the link is up, read the speed and duplex */
366 /* If we aren't autonegotiating, assume speeds
367 * are as set */
368 if (mii_info->autoneg && mii_info->link) {
369 int speed;
370
371 status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
372 if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
373 mii_info->duplex = DUPLEX_FULL;
374 else
375 mii_info->duplex = DUPLEX_HALF;
376
377 speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
378
379 switch (speed) {
380 case MII_CIS8201_AUXCONSTAT_GBIT:
381 mii_info->speed = SPEED_1000;
382 break;
383 case MII_CIS8201_AUXCONSTAT_100:
384 mii_info->speed = SPEED_100;
385 break;
386 default:
387 mii_info->speed = SPEED_10;
388 break;
389 }
390 }
391
392 return 0;
393}
394
395static int marvell_ack_interrupt(struct gfar_mii_info *mii_info)
396{
397 /* Clear the interrupts by reading the reg */
398 phy_read(mii_info, MII_M1011_IEVENT);
399
400 return 0;
401}
402
403static int marvell_config_intr(struct gfar_mii_info *mii_info)
404{
405 if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
406 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
407 else
408 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
409
410 return 0;
411}
412
413static int cis820x_init(struct gfar_mii_info *mii_info)
414{
415 phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
416 MII_CIS8201_AUXCONSTAT_INIT);
417 phy_write(mii_info, MII_CIS8201_EXT_CON1,
418 MII_CIS8201_EXTCON1_INIT);
419
420 return 0;
421}
422
423static int cis820x_ack_interrupt(struct gfar_mii_info *mii_info)
424{
425 phy_read(mii_info, MII_CIS8201_ISTAT);
426
427 return 0;
428}
429
430static int cis820x_config_intr(struct gfar_mii_info *mii_info)
431{
432 if(mii_info->interrupts == MII_INTERRUPT_ENABLED)
433 phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
434 else
435 phy_write(mii_info, MII_CIS8201_IMASK, 0);
436
437 return 0;
438}
439
440#define DM9161_DELAY 10
441
442static int dm9161_read_status(struct gfar_mii_info *mii_info)
443{
444 u16 status;
445 int err;
446
447 /* Update the link, but return if there
448 * was an error */
449 err = genmii_update_link(mii_info);
450 if (err)
451 return err;
452
453 /* If the link is up, read the speed and duplex */
454 /* If we aren't autonegotiating, assume speeds
455 * are as set */
456 if (mii_info->autoneg && mii_info->link) {
457 status = phy_read(mii_info, MII_DM9161_SCSR);
458 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
459 mii_info->speed = SPEED_100;
460 else
461 mii_info->speed = SPEED_10;
462
463 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
464 mii_info->duplex = DUPLEX_FULL;
465 else
466 mii_info->duplex = DUPLEX_HALF;
467 }
468
469 return 0;
470}
471
472
473static int dm9161_config_aneg(struct gfar_mii_info *mii_info)
474{
475 struct dm9161_private *priv = mii_info->priv;
476
477 if(0 == priv->resetdone)
478 return -EAGAIN;
479
480 return 0;
481}
482
483static void dm9161_timer(unsigned long data)
484{
485 struct gfar_mii_info *mii_info = (struct gfar_mii_info *)data;
486 struct dm9161_private *priv = mii_info->priv;
487 u16 status = phy_read(mii_info, MII_BMSR);
488
489 if (status & BMSR_ANEGCOMPLETE) {
490 priv->resetdone = 1;
491 } else
492 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
493}
494
495static int dm9161_init(struct gfar_mii_info *mii_info)
496{
497 struct dm9161_private *priv;
498
499 /* Allocate the private data structure */
500 priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
501
502 if (NULL == priv)
503 return -ENOMEM;
504
505 mii_info->priv = priv;
506
507 /* Reset is not done yet */
508 priv->resetdone = 0;
509
510 /* Isolate the PHY */
511 phy_write(mii_info, MII_BMCR, BMCR_ISOLATE);
512
513 /* Do not bypass the scrambler/descrambler */
514 phy_write(mii_info, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
515
516 /* Clear 10BTCSR to default */
517 phy_write(mii_info, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
518
519 /* Reconnect the PHY, and enable Autonegotiation */
520 phy_write(mii_info, MII_BMCR, BMCR_ANENABLE);
521
522 /* Start a timer for DM9161_DELAY seconds to wait
523 * for the PHY to be ready */
524 init_timer(&priv->timer);
525 priv->timer.function = &dm9161_timer;
526 priv->timer.data = (unsigned long) mii_info;
527 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
528
529 return 0;
530}
531
532static void dm9161_close(struct gfar_mii_info *mii_info)
533{
534 struct dm9161_private *priv = mii_info->priv;
535
536 del_timer_sync(&priv->timer);
537 kfree(priv);
538}
539
540#if 0
541static int dm9161_ack_interrupt(struct gfar_mii_info *mii_info)
542{
543 phy_read(mii_info, MII_DM9161_INTR);
544
545 return 0;
546}
547#endif
548
549/* Cicada 820x */
550static struct phy_info phy_info_cis820x = {
551 0x000fc440,
552 "Cicada Cis8204",
553 0x000fffc0,
554 .features = MII_GBIT_FEATURES,
555 .init = &cis820x_init,
556 .config_aneg = &gbit_config_aneg,
557 .read_status = &cis820x_read_status,
558 .ack_interrupt = &cis820x_ack_interrupt,
559 .config_intr = &cis820x_config_intr,
560};
561
562static struct phy_info phy_info_dm9161 = {
563 .phy_id = 0x0181b880,
564 .name = "Davicom DM9161E",
565 .phy_id_mask = 0x0ffffff0,
566 .init = dm9161_init,
567 .config_aneg = dm9161_config_aneg,
568 .read_status = dm9161_read_status,
569 .close = dm9161_close,
570};
571
572static struct phy_info phy_info_marvell = {
573 .phy_id = 0x01410c00,
574 .phy_id_mask = 0xffffff00,
575 .name = "Marvell 88E1101/88E1111",
576 .features = MII_GBIT_FEATURES,
577 .config_aneg = &marvell_config_aneg,
578 .read_status = &marvell_read_status,
579 .ack_interrupt = &marvell_ack_interrupt,
580 .config_intr = &marvell_config_intr,
581};
582
583static struct phy_info phy_info_genmii= {
584 .phy_id = 0x00000000,
585 .phy_id_mask = 0x00000000,
586 .name = "Generic MII",
587 .features = MII_BASIC_FEATURES,
588 .config_aneg = genmii_config_aneg,
589 .read_status = genmii_read_status,
590};
591
592static struct phy_info *phy_info[] = {
593 &phy_info_cis820x,
594 &phy_info_marvell,
595 &phy_info_dm9161,
596 &phy_info_genmii,
597 NULL
598};
599
600u16 phy_read(struct gfar_mii_info *mii_info, u16 regnum)
601{
602 u16 retval;
603 unsigned long flags;
604
605 spin_lock_irqsave(&mii_info->mdio_lock, flags);
606 retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
607 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
608
609 return retval;
610}
611
612void phy_write(struct gfar_mii_info *mii_info, u16 regnum, u16 val)
613{
614 unsigned long flags;
615
616 spin_lock_irqsave(&mii_info->mdio_lock, flags);
617 mii_info->mdio_write(mii_info->dev,
618 mii_info->mii_id,
619 regnum, val);
620 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
621}
622
623/* Use the PHY ID registers to determine what type of PHY is attached
624 * to device dev. return a struct phy_info structure describing that PHY
625 */
626struct phy_info * get_phy_info(struct gfar_mii_info *mii_info)
627{
628 u16 phy_reg;
629 u32 phy_ID;
630 int i;
631 struct phy_info *theInfo = NULL;
632 struct net_device *dev = mii_info->dev;
633
634 /* Grab the bits from PHYIR1, and put them in the upper half */
635 phy_reg = phy_read(mii_info, MII_PHYSID1);
636 phy_ID = (phy_reg & 0xffff) << 16;
637
638 /* Grab the bits from PHYIR2, and put them in the lower half */
639 phy_reg = phy_read(mii_info, MII_PHYSID2);
640 phy_ID |= (phy_reg & 0xffff);
641
642 /* loop through all the known PHY types, and find one that */
643 /* matches the ID we read from the PHY. */
644 for (i = 0; phy_info[i]; i++)
645 if (phy_info[i]->phy_id ==
646 (phy_ID & phy_info[i]->phy_id_mask)) {
647 theInfo = phy_info[i];
648 break;
649 }
650
651 /* This shouldn't happen, as we have generic PHY support */
652 if (theInfo == NULL) {
653 printk("%s: PHY id %x is not supported!\n", dev->name, phy_ID);
654 return NULL;
655 } else {
656 printk("%s: PHY is %s (%x)\n", dev->name, theInfo->name,
657 phy_ID);
658 }
659
660 return theInfo;
661}
diff --git a/drivers/net/gianfar_phy.h b/drivers/net/gianfar_phy.h
deleted file mode 100644
index 1e9b3abf1e6d..000000000000
--- a/drivers/net/gianfar_phy.h
+++ /dev/null
@@ -1,213 +0,0 @@
1/*
2 * drivers/net/gianfar_phy.h
3 *
4 * Gianfar Ethernet Driver -- PHY handling
5 * Driver for FEC on MPC8540 and TSEC on MPC8540/MPC8560
6 * Based on 8260_io/fcc_enet.c
7 *
8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala (kumar.gala@freescale.com)
10 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __GIANFAR_PHY_H
20#define __GIANFAR_PHY_H
21
22#define MII_end ((u32)-2)
23#define MII_read ((u32)-1)
24
25#define MIIMIND_BUSY 0x00000001
26#define MIIMIND_NOTVALID 0x00000004
27
28#define GFAR_AN_TIMEOUT 2000
29
30/* 1000BT control (Marvell & BCM54xx at least) */
31#define MII_1000BASETCONTROL 0x09
32#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
33#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
34
35/* Cicada Extended Control Register 1 */
36#define MII_CIS8201_EXT_CON1 0x17
37#define MII_CIS8201_EXTCON1_INIT 0x0000
38
39/* Cicada Interrupt Mask Register */
40#define MII_CIS8201_IMASK 0x19
41#define MII_CIS8201_IMASK_IEN 0x8000
42#define MII_CIS8201_IMASK_SPEED 0x4000
43#define MII_CIS8201_IMASK_LINK 0x2000
44#define MII_CIS8201_IMASK_DUPLEX 0x1000
45#define MII_CIS8201_IMASK_MASK 0xf000
46
47/* Cicada Interrupt Status Register */
48#define MII_CIS8201_ISTAT 0x1a
49#define MII_CIS8201_ISTAT_STATUS 0x8000
50#define MII_CIS8201_ISTAT_SPEED 0x4000
51#define MII_CIS8201_ISTAT_LINK 0x2000
52#define MII_CIS8201_ISTAT_DUPLEX 0x1000
53
54/* Cicada Auxiliary Control/Status Register */
55#define MII_CIS8201_AUX_CONSTAT 0x1c
56#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
57#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
58#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
59#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
60#define MII_CIS8201_AUXCONSTAT_100 0x0008
61
62/* 88E1011 PHY Status Register */
63#define MII_M1011_PHY_SPEC_STATUS 0x11
64#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
65#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
66#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
67#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
68#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
69#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
70
71#define MII_M1011_IEVENT 0x13
72#define MII_M1011_IEVENT_CLEAR 0x0000
73
74#define MII_M1011_IMASK 0x12
75#define MII_M1011_IMASK_INIT 0x6400
76#define MII_M1011_IMASK_CLEAR 0x0000
77
78#define MII_DM9161_SCR 0x10
79#define MII_DM9161_SCR_INIT 0x0610
80
81/* DM9161 Specified Configuration and Status Register */
82#define MII_DM9161_SCSR 0x11
83#define MII_DM9161_SCSR_100F 0x8000
84#define MII_DM9161_SCSR_100H 0x4000
85#define MII_DM9161_SCSR_10F 0x2000
86#define MII_DM9161_SCSR_10H 0x1000
87
88/* DM9161 Interrupt Register */
89#define MII_DM9161_INTR 0x15
90#define MII_DM9161_INTR_PEND 0x8000
91#define MII_DM9161_INTR_DPLX_MASK 0x0800
92#define MII_DM9161_INTR_SPD_MASK 0x0400
93#define MII_DM9161_INTR_LINK_MASK 0x0200
94#define MII_DM9161_INTR_MASK 0x0100
95#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
96#define MII_DM9161_INTR_SPD_CHANGE 0x0008
97#define MII_DM9161_INTR_LINK_CHANGE 0x0004
98#define MII_DM9161_INTR_INIT 0x0000
99#define MII_DM9161_INTR_STOP \
100(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
101 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
102
103/* DM9161 10BT Configuration/Status */
104#define MII_DM9161_10BTCSR 0x12
105#define MII_DM9161_10BTCSR_INIT 0x7800
106
107#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
108 SUPPORTED_10baseT_Full | \
109 SUPPORTED_100baseT_Half | \
110 SUPPORTED_100baseT_Full | \
111 SUPPORTED_Autoneg | \
112 SUPPORTED_TP | \
113 SUPPORTED_MII)
114
115#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
116 SUPPORTED_1000baseT_Half | \
117 SUPPORTED_1000baseT_Full)
118
119#define MII_READ_COMMAND 0x00000001
120
121#define MII_INTERRUPT_DISABLED 0x0
122#define MII_INTERRUPT_ENABLED 0x1
123/* Taken from mii_if_info and sungem_phy.h */
124struct gfar_mii_info {
125 /* Information about the PHY type */
126 /* And management functions */
127 struct phy_info *phyinfo;
128
129 /* forced speed & duplex (no autoneg)
130 * partner speed & duplex & pause (autoneg)
131 */
132 int speed;
133 int duplex;
134 int pause;
135
136 /* The most recently read link state */
137 int link;
138
139 /* Enabled Interrupts */
140 u32 interrupts;
141
142 u32 advertising;
143 int autoneg;
144 int mii_id;
145
146 /* private data pointer */
147 /* For use by PHYs to maintain extra state */
148 void *priv;
149
150 /* Provided by host chip */
151 struct net_device *dev;
152
153 /* A lock to ensure that only one thing can read/write
154 * the MDIO bus at a time */
155 spinlock_t mdio_lock;
156
157 /* Provided by ethernet driver */
158 int (*mdio_read) (struct net_device *dev, int mii_id, int reg);
159 void (*mdio_write) (struct net_device *dev, int mii_id, int reg, int val);
160};
161
162/* struct phy_info: a structure which defines attributes for a PHY
163 *
164 * id will contain a number which represents the PHY. During
165 * startup, the driver will poll the PHY to find out what its
166 * UID--as defined by registers 2 and 3--is. The 32-bit result
167 * gotten from the PHY will be ANDed with phy_id_mask to
168 * discard any bits which may change based on revision numbers
169 * unimportant to functionality
170 *
171 * There are 6 commands which take a gfar_mii_info structure.
172 * Each PHY must declare config_aneg, and read_status.
173 */
174struct phy_info {
175 u32 phy_id;
176 char *name;
177 unsigned int phy_id_mask;
178 u32 features;
179
180 /* Called to initialize the PHY */
181 int (*init)(struct gfar_mii_info *mii_info);
182
183 /* Called to suspend the PHY for power */
184 int (*suspend)(struct gfar_mii_info *mii_info);
185
186 /* Reconfigures autonegotiation (or disables it) */
187 int (*config_aneg)(struct gfar_mii_info *mii_info);
188
189 /* Determines the negotiated speed and duplex */
190 int (*read_status)(struct gfar_mii_info *mii_info);
191
192 /* Clears any pending interrupts */
193 int (*ack_interrupt)(struct gfar_mii_info *mii_info);
194
195 /* Enables or disables interrupts */
196 int (*config_intr)(struct gfar_mii_info *mii_info);
197
198 /* Clears up any memory if needed */
199 void (*close)(struct gfar_mii_info *mii_info);
200};
201
202struct phy_info *get_phy_info(struct gfar_mii_info *mii_info);
203int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
204void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
205void mii_clear_phy_interrupt(struct gfar_mii_info *mii_info);
206void mii_configure_phy_interrupt(struct gfar_mii_info *mii_info, u32 interrupts);
207
208struct dm9161_private {
209 struct timer_list timer;
210 int resetdone;
211};
212
213#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index de087cd609d9..896aa02000d7 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -1,6 +1,7 @@
1config MKISS 1config MKISS
2 tristate "Serial port KISS driver" 2 tristate "Serial port KISS driver"
3 depends on AX25 3 depends on AX25
4 select CRC16
4 ---help--- 5 ---help---
5 KISS is a protocol used for the exchange of data between a computer 6 KISS is a protocol used for the exchange of data between a computer
6 and a Terminal Node Controller (a small embedded system commonly 7 and a Terminal Node Controller (a small embedded system commonly
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 1756f0ed54cc..cb43a9d28774 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -144,7 +144,7 @@ static inline struct net_device *bpq_get_ax25_dev(struct net_device *dev)
144{ 144{
145 struct bpqdev *bpq; 145 struct bpqdev *bpq;
146 146
147 list_for_each_entry(bpq, &bpq_devices, bpq_list) { 147 list_for_each_entry_rcu(bpq, &bpq_devices, bpq_list) {
148 if (bpq->ethdev == dev) 148 if (bpq->ethdev == dev)
149 return bpq->axdev; 149 return bpq->axdev;
150 } 150 }
@@ -399,7 +399,7 @@ static void *bpq_seq_start(struct seq_file *seq, loff_t *pos)
399 if (*pos == 0) 399 if (*pos == 0)
400 return SEQ_START_TOKEN; 400 return SEQ_START_TOKEN;
401 401
402 list_for_each_entry(bpqdev, &bpq_devices, bpq_list) { 402 list_for_each_entry_rcu(bpqdev, &bpq_devices, bpq_list) {
403 if (i == *pos) 403 if (i == *pos)
404 return bpqdev; 404 return bpqdev;
405 } 405 }
@@ -418,7 +418,7 @@ static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
418 p = ((struct bpqdev *)v)->bpq_list.next; 418 p = ((struct bpqdev *)v)->bpq_list.next;
419 419
420 return (p == &bpq_devices) ? NULL 420 return (p == &bpq_devices) ? NULL
421 : list_entry(p, struct bpqdev, bpq_list); 421 : rcu_dereference(list_entry(p, struct bpqdev, bpq_list));
422} 422}
423 423
424static void bpq_seq_stop(struct seq_file *seq, void *v) 424static void bpq_seq_stop(struct seq_file *seq, void *v)
@@ -561,8 +561,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
561 if (!dev_is_ethdev(dev)) 561 if (!dev_is_ethdev(dev))
562 return NOTIFY_DONE; 562 return NOTIFY_DONE;
563 563
564 rcu_read_lock();
565
566 switch (event) { 564 switch (event) {
567 case NETDEV_UP: /* new ethernet device -> new BPQ interface */ 565 case NETDEV_UP: /* new ethernet device -> new BPQ interface */
568 if (bpq_get_ax25_dev(dev) == NULL) 566 if (bpq_get_ax25_dev(dev) == NULL)
@@ -581,7 +579,6 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
581 default: 579 default:
582 break; 580 break;
583 } 581 }
584 rcu_read_unlock();
585 582
586 return NOTIFY_DONE; 583 return NOTIFY_DONE;
587} 584}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index d9fe64b46f4b..3e9accf137e7 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -14,13 +14,14 @@
14 * 14 *
15 * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl> 15 * Copyright (C) Hans Alblas PE1AYX <hans@esrac.ele.tue.nl>
16 * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org> 16 * Copyright (C) 2004, 05 Ralf Baechle DL5RB <ralf@linux-mips.org>
17 * Copyright (C) 2004, 05 Thomas Osterried DL9SAU <thomas@x-berg.in-berlin.de>
17 */ 18 */
18
19#include <linux/config.h> 19#include <linux/config.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <asm/system.h> 21#include <asm/system.h>
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <asm/uaccess.h> 23#include <asm/uaccess.h>
24#include <linux/crc16.h>
24#include <linux/string.h> 25#include <linux/string.h>
25#include <linux/mm.h> 26#include <linux/mm.h>
26#include <linux/interrupt.h> 27#include <linux/interrupt.h>
@@ -39,11 +40,6 @@
39 40
40#include <net/ax25.h> 41#include <net/ax25.h>
41 42
42#ifdef CONFIG_INET
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#endif
46
47#define AX_MTU 236 43#define AX_MTU 236
48 44
49/* SLIP/KISS protocol characters. */ 45/* SLIP/KISS protocol characters. */
@@ -80,9 +76,13 @@ struct mkiss {
80 76
81 int mode; 77 int mode;
82 int crcmode; /* MW: for FlexNet, SMACK etc. */ 78 int crcmode; /* MW: for FlexNet, SMACK etc. */
83#define CRC_MODE_NONE 0 79 int crcauto; /* CRC auto mode */
84#define CRC_MODE_FLEX 1 80
85#define CRC_MODE_SMACK 2 81#define CRC_MODE_NONE 0
82#define CRC_MODE_FLEX 1
83#define CRC_MODE_SMACK 2
84#define CRC_MODE_FLEX_TEST 3
85#define CRC_MODE_SMACK_TEST 4
86 86
87 atomic_t refcnt; 87 atomic_t refcnt;
88 struct semaphore dead_sem; 88 struct semaphore dead_sem;
@@ -151,6 +151,21 @@ static int check_crc_flex(unsigned char *cp, int size)
151 return 0; 151 return 0;
152} 152}
153 153
154static int check_crc_16(unsigned char *cp, int size)
155{
156 unsigned short crc = 0x0000;
157
158 if (size < 3)
159 return -1;
160
161 crc = crc16(0, cp, size);
162
163 if (crc != 0x0000)
164 return -1;
165
166 return 0;
167}
168
154/* 169/*
155 * Standard encapsulation 170 * Standard encapsulation
156 */ 171 */
@@ -237,19 +252,42 @@ static void ax_bump(struct mkiss *ax)
237 252
238 spin_lock_bh(&ax->buflock); 253 spin_lock_bh(&ax->buflock);
239 if (ax->rbuff[0] > 0x0f) { 254 if (ax->rbuff[0] > 0x0f) {
240 if (ax->rbuff[0] & 0x20) { 255 if (ax->rbuff[0] & 0x80) {
241 ax->crcmode = CRC_MODE_FLEX; 256 if (check_crc_16(ax->rbuff, ax->rcount) < 0) {
257 ax->stats.rx_errors++;
258 spin_unlock_bh(&ax->buflock);
259
260 return;
261 }
262 if (ax->crcmode != CRC_MODE_SMACK && ax->crcauto) {
263 printk(KERN_INFO
264 "mkiss: %s: Switchting to crc-smack\n",
265 ax->dev->name);
266 ax->crcmode = CRC_MODE_SMACK;
267 }
268 ax->rcount -= 2;
269 *ax->rbuff &= ~0x80;
270 } else if (ax->rbuff[0] & 0x20) {
242 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) { 271 if (check_crc_flex(ax->rbuff, ax->rcount) < 0) {
243 ax->stats.rx_errors++; 272 ax->stats.rx_errors++;
273 spin_unlock_bh(&ax->buflock);
244 return; 274 return;
245 } 275 }
276 if (ax->crcmode != CRC_MODE_FLEX && ax->crcauto) {
277 printk(KERN_INFO
278 "mkiss: %s: Switchting to crc-flexnet\n",
279 ax->dev->name);
280 ax->crcmode = CRC_MODE_FLEX;
281 }
246 ax->rcount -= 2; 282 ax->rcount -= 2;
247 /* dl9sau bugfix: the trailling two bytes flexnet crc 283
248 * will not be passed to the kernel. thus we have 284 /*
249 * to correct the kissparm signature, because it 285 * dl9sau bugfix: the trailling two bytes flexnet crc
250 * indicates a crc but there's none 286 * will not be passed to the kernel. thus we have to
287 * correct the kissparm signature, because it indicates
288 * a crc but there's none
251 */ 289 */
252 *ax->rbuff &= ~0x20; 290 *ax->rbuff &= ~0x20;
253 } 291 }
254 } 292 }
255 spin_unlock_bh(&ax->buflock); 293 spin_unlock_bh(&ax->buflock);
@@ -352,10 +390,8 @@ static void ax_changedmtu(struct mkiss *ax)
352 "MTU change cancelled.\n", 390 "MTU change cancelled.\n",
353 ax->dev->name); 391 ax->dev->name);
354 dev->mtu = ax->mtu; 392 dev->mtu = ax->mtu;
355 if (xbuff != NULL) 393 kfree(xbuff);
356 kfree(xbuff); 394 kfree(rbuff);
357 if (rbuff != NULL)
358 kfree(rbuff);
359 return; 395 return;
360 } 396 }
361 397
@@ -417,20 +453,69 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
417 p = icp; 453 p = icp;
418 454
419 spin_lock_bh(&ax->buflock); 455 spin_lock_bh(&ax->buflock);
420 switch (ax->crcmode) { 456 if ((*p & 0x0f) != 0) {
421 unsigned short crc; 457 /* Configuration Command (kissparms(1).
458 * Protocol spec says: never append CRC.
459 * This fixes a very old bug in the linux
460 * kiss driver. -- dl9sau */
461 switch (*p & 0xff) {
462 case 0x85:
463 /* command from userspace especially for us,
464 * not for delivery to the tnc */
465 if (len > 1) {
466 int cmd = (p[1] & 0xff);
467 switch(cmd) {
468 case 3:
469 ax->crcmode = CRC_MODE_SMACK;
470 break;
471 case 2:
472 ax->crcmode = CRC_MODE_FLEX;
473 break;
474 case 1:
475 ax->crcmode = CRC_MODE_NONE;
476 break;
477 case 0:
478 default:
479 ax->crcmode = CRC_MODE_SMACK_TEST;
480 cmd = 0;
481 }
482 ax->crcauto = (cmd ? 0 : 1);
483 printk(KERN_INFO "mkiss: %s: crc mode %s %d\n", ax->dev->name, (len) ? "set to" : "is", cmd);
484 }
485 spin_unlock_bh(&ax->buflock);
486 netif_start_queue(dev);
422 487
423 case CRC_MODE_FLEX: 488 return;
424 *p |= 0x20; 489 default:
425 crc = calc_crc_flex(p, len); 490 count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
426 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2); 491 }
427 break; 492 } else {
493 unsigned short crc;
494 switch (ax->crcmode) {
495 case CRC_MODE_SMACK_TEST:
496 ax->crcmode = CRC_MODE_FLEX_TEST;
497 printk(KERN_INFO "mkiss: %s: Trying crc-smack\n", ax->dev->name);
498 // fall through
499 case CRC_MODE_SMACK:
500 *p |= 0x80;
501 crc = swab16(crc16(0, p, len));
502 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
503 break;
504 case CRC_MODE_FLEX_TEST:
505 ax->crcmode = CRC_MODE_NONE;
506 printk(KERN_INFO "mkiss: %s: Trying crc-flexnet\n", ax->dev->name);
507 // fall through
508 case CRC_MODE_FLEX:
509 *p |= 0x20;
510 crc = calc_crc_flex(p, len);
511 count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
512 break;
513
514 default:
515 count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
516 }
517 }
428 518
429 default:
430 count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
431 break;
432 }
433
434 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); 519 set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
435 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); 520 actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
436 ax->stats.tx_packets++; 521 ax->stats.tx_packets++;
@@ -439,8 +524,6 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
439 ax->dev->trans_start = jiffies; 524 ax->dev->trans_start = jiffies;
440 ax->xleft = count - actual; 525 ax->xleft = count - actual;
441 ax->xhead = ax->xbuff + actual; 526 ax->xhead = ax->xbuff + actual;
442
443 spin_unlock_bh(&ax->buflock);
444} 527}
445 528
446/* Encapsulate an AX.25 packet and kick it into a TTY queue. */ 529/* Encapsulate an AX.25 packet and kick it into a TTY queue. */
@@ -622,7 +705,7 @@ static void ax_setup(struct net_device *dev)
622 * best way to fix this is to use a rwlock in the tty struct, but for now we 705 * best way to fix this is to use a rwlock in the tty struct, but for now we
623 * use a single global rwlock for all ttys in ppp line discipline. 706 * use a single global rwlock for all ttys in ppp line discipline.
624 */ 707 */
625static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED; 708static DEFINE_RWLOCK(disc_data_lock);
626 709
627static struct mkiss *mkiss_get(struct tty_struct *tty) 710static struct mkiss *mkiss_get(struct tty_struct *tty)
628{ 711{
@@ -643,6 +726,8 @@ static void mkiss_put(struct mkiss *ax)
643 up(&ax->dead_sem); 726 up(&ax->dead_sem);
644} 727}
645 728
729static int crc_force = 0; /* Can be overridden with insmod */
730
646static int mkiss_open(struct tty_struct *tty) 731static int mkiss_open(struct tty_struct *tty)
647{ 732{
648 struct net_device *dev; 733 struct net_device *dev;
@@ -682,6 +767,33 @@ static int mkiss_open(struct tty_struct *tty)
682 if (register_netdev(dev)) 767 if (register_netdev(dev))
683 goto out_free_buffers; 768 goto out_free_buffers;
684 769
770 /* after register_netdev() - because else printk smashes the kernel */
771 switch (crc_force) {
772 case 3:
773 ax->crcmode = CRC_MODE_SMACK;
774 printk(KERN_INFO "mkiss: %s: crc mode smack forced.\n",
775 ax->dev->name);
776 break;
777 case 2:
778 ax->crcmode = CRC_MODE_FLEX;
779 printk(KERN_INFO "mkiss: %s: crc mode flexnet forced.\n",
780 ax->dev->name);
781 break;
782 case 1:
783 ax->crcmode = CRC_MODE_NONE;
784 printk(KERN_INFO "mkiss: %s: crc mode disabled.\n",
785 ax->dev->name);
786 break;
787 case 0:
788 /* fall through */
789 default:
790 crc_force = 0;
791 printk(KERN_INFO "mkiss: %s: crc mode is auto.\n",
792 ax->dev->name);
793 ax->crcmode = CRC_MODE_SMACK_TEST;
794 }
795 ax->crcauto = (crc_force ? 0 : 1);
796
685 netif_start_queue(dev); 797 netif_start_queue(dev);
686 798
687 /* Done. We have linked the TTY line to a channel. */ 799 /* Done. We have linked the TTY line to a channel. */
@@ -765,7 +877,6 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
765 877
766 case SIOCSIFHWADDR: { 878 case SIOCSIFHWADDR: {
767 char addr[AX25_ADDR_LEN]; 879 char addr[AX25_ADDR_LEN];
768printk(KERN_INFO "In SIOCSIFHWADDR");
769 880
770 if (copy_from_user(&addr, 881 if (copy_from_user(&addr,
771 (void __user *) arg, AX25_ADDR_LEN)) { 882 (void __user *) arg, AX25_ADDR_LEN)) {
@@ -864,6 +975,7 @@ out:
864} 975}
865 976
866static struct tty_ldisc ax_ldisc = { 977static struct tty_ldisc ax_ldisc = {
978 .owner = THIS_MODULE,
867 .magic = TTY_LDISC_MAGIC, 979 .magic = TTY_LDISC_MAGIC,
868 .name = "mkiss", 980 .name = "mkiss",
869 .open = mkiss_open, 981 .open = mkiss_open,
@@ -904,6 +1016,8 @@ static void __exit mkiss_exit_driver(void)
904 1016
905MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>"); 1017MODULE_AUTHOR("Ralf Baechle DL5RB <ralf@linux-mips.org>");
906MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs"); 1018MODULE_DESCRIPTION("KISS driver for AX.25 over TTYs");
1019MODULE_PARM(crc_force, "i");
1020MODULE_PARM_DESC(crc_force, "crc [0 = auto | 1 = none | 2 = flexnet | 3 = smack]");
907MODULE_LICENSE("GPL"); 1021MODULE_LICENSE("GPL");
908MODULE_ALIAS_LDISC(N_AX25); 1022MODULE_ALIAS_LDISC(N_AX25);
909 1023
diff --git a/drivers/net/hamradio/mkiss.h b/drivers/net/hamradio/mkiss.h
deleted file mode 100644
index 4ab700478598..000000000000
--- a/drivers/net/hamradio/mkiss.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/****************************************************************************
2 * Defines for the Multi-KISS driver.
3 ****************************************************************************/
4
5#define AX25_MAXDEV 16 /* MAX number of AX25 channels;
6 This can be overridden with
7 insmod -oax25_maxdev=nnn */
8#define AX_MTU 236
9
10/* SLIP/KISS protocol characters. */
11#define END 0300 /* indicates end of frame */
12#define ESC 0333 /* indicates byte stuffing */
13#define ESC_END 0334 /* ESC ESC_END means END 'data' */
14#define ESC_ESC 0335 /* ESC ESC_ESC means ESC 'data' */
15
16struct ax_disp {
17 int magic;
18
19 /* Various fields. */
20 struct tty_struct *tty; /* ptr to TTY structure */
21 struct net_device *dev; /* easy for intr handling */
22
23 /* These are pointers to the malloc()ed frame buffers. */
24 unsigned char *rbuff; /* receiver buffer */
25 int rcount; /* received chars counter */
26 unsigned char *xbuff; /* transmitter buffer */
27 unsigned char *xhead; /* pointer to next byte to XMIT */
28 int xleft; /* bytes left in XMIT queue */
29
30 /* SLIP interface statistics. */
31 unsigned long rx_packets; /* inbound frames counter */
32 unsigned long tx_packets; /* outbound frames counter */
33 unsigned long rx_bytes; /* inbound bytes counter */
34 unsigned long tx_bytes; /* outbound bytes counter */
35 unsigned long rx_errors; /* Parity, etc. errors */
36 unsigned long tx_errors; /* Planned stuff */
37 unsigned long rx_dropped; /* No memory for skb */
38 unsigned long tx_dropped; /* When MTU change */
39 unsigned long rx_over_errors; /* Frame bigger then SLIP buf. */
40
41 /* Detailed SLIP statistics. */
42 int mtu; /* Our mtu (to spot changes!) */
43 int buffsize; /* Max buffers sizes */
44
45
46 unsigned long flags; /* Flag values/ mode etc */
47 /* long req'd: used by set_bit --RR */
48#define AXF_INUSE 0 /* Channel in use */
49#define AXF_ESCAPE 1 /* ESC received */
50#define AXF_ERROR 2 /* Parity, etc. error */
51#define AXF_KEEPTEST 3 /* Keepalive test flag */
52#define AXF_OUTWAIT 4 /* is outpacket was flag */
53
54 int mode;
55 int crcmode; /* MW: for FlexNet, SMACK etc. */
56#define CRC_MODE_NONE 0
57#define CRC_MODE_FLEX 1
58#define CRC_MODE_SMACK 2
59 spinlock_t buflock; /* lock for rbuf and xbuf */
60};
61
62#define AX25_MAGIC 0x5316
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index cf0ac6fda1a1..b71fab6e34f4 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -2517,10 +2517,8 @@ static int hp100_down_vg_link(struct net_device *dev)
2517 do { 2517 do {
2518 if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) 2518 if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
2519 break; 2519 break;
2520 if (!in_interrupt()) { 2520 if (!in_interrupt())
2521 set_current_state(TASK_INTERRUPTIBLE); 2521 schedule_timeout_interruptible(1);
2522 schedule_timeout(1);
2523 }
2524 } while (time_after(time, jiffies)); 2522 } while (time_after(time, jiffies));
2525 2523
2526 if (time_after_eq(jiffies, time)) /* no signal->no logout */ 2524 if (time_after_eq(jiffies, time)) /* no signal->no logout */
@@ -2536,10 +2534,8 @@ static int hp100_down_vg_link(struct net_device *dev)
2536 do { 2534 do {
2537 if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) 2535 if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
2538 break; 2536 break;
2539 if (!in_interrupt()) { 2537 if (!in_interrupt())
2540 set_current_state(TASK_INTERRUPTIBLE); 2538 schedule_timeout_interruptible(1);
2541 schedule_timeout(1);
2542 }
2543 } while (time_after(time, jiffies)); 2539 } while (time_after(time, jiffies));
2544 2540
2545#ifdef HP100_DEBUG 2541#ifdef HP100_DEBUG
@@ -2577,10 +2573,8 @@ static int hp100_down_vg_link(struct net_device *dev)
2577 do { 2573 do {
2578 if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST)) 2574 if (!(hp100_inb(MAC_CFG_4) & HP100_MAC_SEL_ST))
2579 break; 2575 break;
2580 if (!in_interrupt()) { 2576 if (!in_interrupt())
2581 set_current_state(TASK_INTERRUPTIBLE); 2577 schedule_timeout_interruptible(1);
2582 schedule_timeout(1);
2583 }
2584 } while (time_after(time, jiffies)); 2578 } while (time_after(time, jiffies));
2585 2579
2586 hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */ 2580 hp100_orb(HP100_AUTO_MODE, MAC_CFG_3); /* Autosel back on */
@@ -2591,10 +2585,8 @@ static int hp100_down_vg_link(struct net_device *dev)
2591 do { 2585 do {
2592 if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0) 2586 if ((hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) == 0)
2593 break; 2587 break;
2594 if (!in_interrupt()) { 2588 if (!in_interrupt())
2595 set_current_state(TASK_INTERRUPTIBLE); 2589 schedule_timeout_interruptible(1);
2596 schedule_timeout(1);
2597 }
2598 } while (time_after(time, jiffies)); 2590 } while (time_after(time, jiffies));
2599 2591
2600 if (time_before_eq(time, jiffies)) { 2592 if (time_before_eq(time, jiffies)) {
@@ -2606,10 +2598,8 @@ static int hp100_down_vg_link(struct net_device *dev)
2606 2598
2607 time = jiffies + (2 * HZ); /* This seems to take a while.... */ 2599 time = jiffies + (2 * HZ); /* This seems to take a while.... */
2608 do { 2600 do {
2609 if (!in_interrupt()) { 2601 if (!in_interrupt())
2610 set_current_state(TASK_INTERRUPTIBLE); 2602 schedule_timeout_interruptible(1);
2611 schedule_timeout(1);
2612 }
2613 } while (time_after(time, jiffies)); 2603 } while (time_after(time, jiffies));
2614 2604
2615 return 0; 2605 return 0;
@@ -2659,10 +2649,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2659 do { 2649 do {
2660 if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) 2650 if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
2661 break; 2651 break;
2662 if (!in_interrupt()) { 2652 if (!in_interrupt())
2663 set_current_state(TASK_INTERRUPTIBLE); 2653 schedule_timeout_interruptible(1);
2664 schedule_timeout(1);
2665 }
2666 } while (time_after(time, jiffies)); 2654 } while (time_after(time, jiffies));
2667 2655
2668 /* Start an addressed training and optionally request promiscuous port */ 2656 /* Start an addressed training and optionally request promiscuous port */
@@ -2697,10 +2685,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2697 do { 2685 do {
2698 if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST) 2686 if (hp100_inb(VG_LAN_CFG_1) & HP100_LINK_CABLE_ST)
2699 break; 2687 break;
2700 if (!in_interrupt()) { 2688 if (!in_interrupt())
2701 set_current_state(TASK_INTERRUPTIBLE); 2689 schedule_timeout_interruptible(1);
2702 schedule_timeout(1);
2703 }
2704 } while (time_before(jiffies, time)); 2690 } while (time_before(jiffies, time));
2705 2691
2706 if (time_after_eq(jiffies, time)) { 2692 if (time_after_eq(jiffies, time)) {
@@ -2723,10 +2709,8 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2723#endif 2709#endif
2724 break; 2710 break;
2725 } 2711 }
2726 if (!in_interrupt()) { 2712 if (!in_interrupt())
2727 set_current_state(TASK_INTERRUPTIBLE); 2713 schedule_timeout_interruptible(1);
2728 schedule_timeout(1);
2729 }
2730 } while (time_after(time, jiffies)); 2714 } while (time_after(time, jiffies));
2731 } 2715 }
2732 2716
diff --git a/drivers/net/ibm_emac/Makefile b/drivers/net/ibm_emac/Makefile
index 7f583a333c24..f98ddf0e807a 100644
--- a/drivers/net/ibm_emac/Makefile
+++ b/drivers/net/ibm_emac/Makefile
@@ -1,12 +1,11 @@
1# 1#
2# Makefile for the IBM PPC4xx EMAC controllers 2# Makefile for the PowerPC 4xx on-chip ethernet driver
3# 3#
4 4
5obj-$(CONFIG_IBM_EMAC) += ibm_emac.o 5obj-$(CONFIG_IBM_EMAC) += ibm_emac.o
6 6
7ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o 7ibm_emac-objs := ibm_emac_mal.o ibm_emac_core.o ibm_emac_phy.o
8 8ibm_emac-$(CONFIG_IBM_EMAC_ZMII) += ibm_emac_zmii.o
9# Only need this if you want to see additional debug messages 9ibm_emac-$(CONFIG_IBM_EMAC_RGMII) += ibm_emac_rgmii.o
10ifeq ($(CONFIG_IBM_EMAC_ERRMSG), y) 10ibm_emac-$(CONFIG_IBM_EMAC_TAH) += ibm_emac_tah.o
11ibm_emac-objs += ibm_emac_debug.o 11ibm_emac-$(CONFIG_IBM_EMAC_DEBUG) += ibm_emac_debug.o
12endif
diff --git a/drivers/net/ibm_emac/ibm_emac.h b/drivers/net/ibm_emac/ibm_emac.h
index 15d5a0e82862..28c476f28c20 100644
--- a/drivers/net/ibm_emac/ibm_emac.h
+++ b/drivers/net/ibm_emac/ibm_emac.h
@@ -1,110 +1,142 @@
1/* 1/*
2 * ibm_emac.h 2 * drivers/net/ibm_emac/ibm_emac.h
3 * 3 *
4 * Register definitions for PowerPC 4xx on-chip ethernet contoller
4 * 5 *
5 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
6 * June, 2002 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
7 * 8 *
8 * Copyright 2002 MontaVista Softare Inc. 9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * Armin Kuster <akuster@mvista.com>
12 * Copyright 2002-2004 MontaVista Software Inc.
9 * 13 *
10 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version. 17 * option) any later version.
18 *
14 */ 19 */
20#ifndef __IBM_EMAC_H_
21#define __IBM_EMAC_H_
22
23#include <linux/config.h>
24#include <linux/types.h>
25
26/* This is a simple check to prevent use of this driver on non-tested SoCs */
27#if !defined(CONFIG_405GP) && !defined(CONFIG_405GPR) && !defined(CONFIG_405EP) && \
28 !defined(CONFIG_440GP) && !defined(CONFIG_440GX) && !defined(CONFIG_440SP) && \
29 !defined(CONFIG_440EP) && !defined(CONFIG_NP405H)
30#error "Unknown SoC. Please, check chip user manual and make sure EMAC defines are OK"
31#endif
32
33/* EMAC registers Write Access rules */
34struct emac_regs {
35 u32 mr0; /* special */
36 u32 mr1; /* Reset */
37 u32 tmr0; /* special */
38 u32 tmr1; /* special */
39 u32 rmr; /* Reset */
40 u32 isr; /* Always */
41 u32 iser; /* Reset */
42 u32 iahr; /* Reset, R, T */
43 u32 ialr; /* Reset, R, T */
44 u32 vtpid; /* Reset, R, T */
45 u32 vtci; /* Reset, R, T */
46 u32 ptr; /* Reset, T */
47 u32 iaht1; /* Reset, R */
48 u32 iaht2; /* Reset, R */
49 u32 iaht3; /* Reset, R */
50 u32 iaht4; /* Reset, R */
51 u32 gaht1; /* Reset, R */
52 u32 gaht2; /* Reset, R */
53 u32 gaht3; /* Reset, R */
54 u32 gaht4; /* Reset, R */
55 u32 lsah;
56 u32 lsal;
57 u32 ipgvr; /* Reset, T */
58 u32 stacr; /* special */
59 u32 trtr; /* special */
60 u32 rwmr; /* Reset */
61 u32 octx;
62 u32 ocrx;
63 u32 ipcr;
64};
65
66#if !defined(CONFIG_IBM_EMAC4)
67#define EMAC_ETHTOOL_REGS_VER 0
68#define EMAC_ETHTOOL_REGS_SIZE (sizeof(struct emac_regs) - sizeof(u32))
69#else
70#define EMAC_ETHTOOL_REGS_VER 1
71#define EMAC_ETHTOOL_REGS_SIZE sizeof(struct emac_regs)
72#endif
15 73
16#ifndef _IBM_EMAC_H_ 74/* EMACx_MR0 */
17#define _IBM_EMAC_H_ 75#define EMAC_MR0_RXI 0x80000000
18/* General defines needed for the driver */ 76#define EMAC_MR0_TXI 0x40000000
77#define EMAC_MR0_SRST 0x20000000
78#define EMAC_MR0_TXE 0x10000000
79#define EMAC_MR0_RXE 0x08000000
80#define EMAC_MR0_WKE 0x04000000
19 81
20/* Emac */ 82/* EMACx_MR1 */
21typedef struct emac_regs { 83#define EMAC_MR1_FDE 0x80000000
22 u32 em0mr0; 84#define EMAC_MR1_ILE 0x40000000
23 u32 em0mr1; 85#define EMAC_MR1_VLE 0x20000000
24 u32 em0tmr0; 86#define EMAC_MR1_EIFC 0x10000000
25 u32 em0tmr1; 87#define EMAC_MR1_APP 0x08000000
26 u32 em0rmr; 88#define EMAC_MR1_IST 0x01000000
27 u32 em0isr;
28 u32 em0iser;
29 u32 em0iahr;
30 u32 em0ialr;
31 u32 em0vtpid;
32 u32 em0vtci;
33 u32 em0ptr;
34 u32 em0iaht1;
35 u32 em0iaht2;
36 u32 em0iaht3;
37 u32 em0iaht4;
38 u32 em0gaht1;
39 u32 em0gaht2;
40 u32 em0gaht3;
41 u32 em0gaht4;
42 u32 em0lsah;
43 u32 em0lsal;
44 u32 em0ipgvr;
45 u32 em0stacr;
46 u32 em0trtr;
47 u32 em0rwmr;
48} emac_t;
49 89
50/* MODE REG 0 */ 90#define EMAC_MR1_MF_MASK 0x00c00000
51#define EMAC_M0_RXI 0x80000000 91#define EMAC_MR1_MF_10 0x00000000
52#define EMAC_M0_TXI 0x40000000 92#define EMAC_MR1_MF_100 0x00400000
53#define EMAC_M0_SRST 0x20000000 93#if !defined(CONFIG_IBM_EMAC4)
54#define EMAC_M0_TXE 0x10000000 94#define EMAC_MR1_MF_1000 0x00000000
55#define EMAC_M0_RXE 0x08000000 95#define EMAC_MR1_MF_1000GPCS 0x00000000
56#define EMAC_M0_WKE 0x04000000 96#define EMAC_MR1_MF_IPPA(id) 0x00000000
97#else
98#define EMAC_MR1_MF_1000 0x00800000
99#define EMAC_MR1_MF_1000GPCS 0x00c00000
100#define EMAC_MR1_MF_IPPA(id) (((id) & 0x1f) << 6)
101#endif
57 102
58/* MODE Reg 1 */ 103#define EMAC_TX_FIFO_SIZE 2048
59#define EMAC_M1_FDE 0x80000000
60#define EMAC_M1_ILE 0x40000000
61#define EMAC_M1_VLE 0x20000000
62#define EMAC_M1_EIFC 0x10000000
63#define EMAC_M1_APP 0x08000000
64#define EMAC_M1_AEMI 0x02000000
65#define EMAC_M1_IST 0x01000000
66#define EMAC_M1_MF_1000GPCS 0x00c00000 /* Internal GPCS */
67#define EMAC_M1_MF_1000MBPS 0x00800000 /* External GPCS */
68#define EMAC_M1_MF_100MBPS 0x00400000
69#define EMAC_M1_RFS_16K 0x00280000 /* 000 for 512 byte */
70#define EMAC_M1_TR 0x00008000
71#ifdef CONFIG_IBM_EMAC4
72#define EMAC_M1_RFS_8K 0x00200000
73#define EMAC_M1_RFS_4K 0x00180000
74#define EMAC_M1_RFS_2K 0x00100000
75#define EMAC_M1_RFS_1K 0x00080000
76#define EMAC_M1_TX_FIFO_16K 0x00050000 /* 0's for 512 byte */
77#define EMAC_M1_TX_FIFO_8K 0x00040000
78#define EMAC_M1_TX_FIFO_4K 0x00030000
79#define EMAC_M1_TX_FIFO_2K 0x00020000
80#define EMAC_M1_TX_FIFO_1K 0x00010000
81#define EMAC_M1_TX_TR 0x00008000
82#define EMAC_M1_TX_MWSW 0x00001000 /* 0 wait for status */
83#define EMAC_M1_JUMBO_ENABLE 0x00000800 /* Upt to 9Kr status */
84#define EMAC_M1_OPB_CLK_66 0x00000008 /* 66Mhz */
85#define EMAC_M1_OPB_CLK_83 0x00000010 /* 83Mhz */
86#define EMAC_M1_OPB_CLK_100 0x00000018 /* 100Mhz */
87#define EMAC_M1_OPB_CLK_100P 0x00000020 /* 100Mhz+ */
88#else /* CONFIG_IBM_EMAC4 */
89#define EMAC_M1_RFS_4K 0x00300000 /* ~4k for 512 byte */
90#define EMAC_M1_RFS_2K 0x00200000
91#define EMAC_M1_RFS_1K 0x00100000
92#define EMAC_M1_TX_FIFO_2K 0x00080000 /* 0's for 512 byte */
93#define EMAC_M1_TX_FIFO_1K 0x00040000
94#define EMAC_M1_TR0_DEPEND 0x00010000 /* 0'x for single packet */
95#define EMAC_M1_TR1_DEPEND 0x00004000
96#define EMAC_M1_TR1_MULTI 0x00002000
97#define EMAC_M1_JUMBO_ENABLE 0x00001000
98#endif /* CONFIG_IBM_EMAC4 */
99#define EMAC_M1_BASE (EMAC_M1_TX_FIFO_2K | \
100 EMAC_M1_APP | \
101 EMAC_M1_TR | EMAC_M1_VLE)
102 104
103/* Transmit Mode Register 0 */ 105#if !defined(CONFIG_IBM_EMAC4)
104#define EMAC_TMR0_GNP0 0x80000000 106#define EMAC_MR1_RFS_4K 0x00300000
105#define EMAC_TMR0_GNP1 0x40000000 107#define EMAC_MR1_RFS_16K 0x00000000
106#define EMAC_TMR0_GNPD 0x20000000 108#define EMAC_RX_FIFO_SIZE(gige) 4096
107#define EMAC_TMR0_FC 0x10000000 109#define EMAC_MR1_TFS_2K 0x00080000
110#define EMAC_MR1_TR0_MULT 0x00008000
111#define EMAC_MR1_JPSM 0x00000000
112#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
113#else
114#define EMAC_MR1_RFS_4K 0x00180000
115#define EMAC_MR1_RFS_16K 0x00280000
116#define EMAC_RX_FIFO_SIZE(gige) ((gige) ? 16384 : 4096)
117#define EMAC_MR1_TFS_2K 0x00020000
118#define EMAC_MR1_TR 0x00008000
119#define EMAC_MR1_MWSW_001 0x00001000
120#define EMAC_MR1_JPSM 0x00000800
121#define EMAC_MR1_OBCI_MASK 0x00000038
122#define EMAC_MR1_OBCI_50 0x00000000
123#define EMAC_MR1_OBCI_66 0x00000008
124#define EMAC_MR1_OBCI_83 0x00000010
125#define EMAC_MR1_OBCI_100 0x00000018
126#define EMAC_MR1_OBCI_100P 0x00000020
127#define EMAC_MR1_OBCI(freq) ((freq) <= 50 ? EMAC_MR1_OBCI_50 : \
128 (freq) <= 66 ? EMAC_MR1_OBCI_66 : \
129 (freq) <= 83 ? EMAC_MR1_OBCI_83 : \
130 (freq) <= 100 ? EMAC_MR1_OBCI_100 : EMAC_MR1_OBCI_100P)
131#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR | \
132 EMAC_MR1_MWSW_001 | EMAC_MR1_OBCI(opb))
133#endif
134
135/* EMACx_TMR0 */
136#define EMAC_TMR0_GNP 0x80000000
137#if !defined(CONFIG_IBM_EMAC4)
138#define EMAC_TMR0_DEFAULT 0x00000000
139#else
108#define EMAC_TMR0_TFAE_2_32 0x00000001 140#define EMAC_TMR0_TFAE_2_32 0x00000001
109#define EMAC_TMR0_TFAE_4_64 0x00000002 141#define EMAC_TMR0_TFAE_4_64 0x00000002
110#define EMAC_TMR0_TFAE_8_128 0x00000003 142#define EMAC_TMR0_TFAE_8_128 0x00000003
@@ -112,14 +144,36 @@ typedef struct emac_regs {
112#define EMAC_TMR0_TFAE_32_512 0x00000005 144#define EMAC_TMR0_TFAE_32_512 0x00000005
113#define EMAC_TMR0_TFAE_64_1024 0x00000006 145#define EMAC_TMR0_TFAE_64_1024 0x00000006
114#define EMAC_TMR0_TFAE_128_2048 0x00000007 146#define EMAC_TMR0_TFAE_128_2048 0x00000007
147#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
148#endif
149#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP | EMAC_TMR0_DEFAULT)
150
151/* EMACx_TMR1 */
152
153/* IBM manuals are not very clear here.
154 * This is my interpretation of how things are. --ebs
155 */
156#if defined(CONFIG_40x)
157#define EMAC_FIFO_ENTRY_SIZE 8
158#define EMAC_MAL_BURST_SIZE (16 * 4)
159#else
160#define EMAC_FIFO_ENTRY_SIZE 16
161#define EMAC_MAL_BURST_SIZE (64 * 4)
162#endif
163
164#if !defined(CONFIG_IBM_EMAC4)
165#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0xff) << 16))
166#else
167#define EMAC_TMR1(l,h) (((l) << 27) | (((h) & 0x3ff) << 14))
168#endif
115 169
116/* Receive Mode Register */ 170/* EMACx_RMR */
117#define EMAC_RMR_SP 0x80000000 171#define EMAC_RMR_SP 0x80000000
118#define EMAC_RMR_SFCS 0x40000000 172#define EMAC_RMR_SFCS 0x40000000
119#define EMAC_RMR_ARRP 0x20000000 173#define EMAC_RMR_RRP 0x20000000
120#define EMAC_RMR_ARP 0x10000000 174#define EMAC_RMR_RFP 0x10000000
121#define EMAC_RMR_AROP 0x08000000 175#define EMAC_RMR_ROP 0x08000000
122#define EMAC_RMR_ARPI 0x04000000 176#define EMAC_RMR_RPIR 0x04000000
123#define EMAC_RMR_PPP 0x02000000 177#define EMAC_RMR_PPP 0x02000000
124#define EMAC_RMR_PME 0x01000000 178#define EMAC_RMR_PME 0x01000000
125#define EMAC_RMR_PMME 0x00800000 179#define EMAC_RMR_PMME 0x00800000
@@ -127,6 +181,9 @@ typedef struct emac_regs {
127#define EMAC_RMR_MIAE 0x00200000 181#define EMAC_RMR_MIAE 0x00200000
128#define EMAC_RMR_BAE 0x00100000 182#define EMAC_RMR_BAE 0x00100000
129#define EMAC_RMR_MAE 0x00080000 183#define EMAC_RMR_MAE 0x00080000
184#if !defined(CONFIG_IBM_EMAC4)
185#define EMAC_RMR_BASE 0x00000000
186#else
130#define EMAC_RMR_RFAF_2_32 0x00000001 187#define EMAC_RMR_RFAF_2_32 0x00000001
131#define EMAC_RMR_RFAF_4_64 0x00000002 188#define EMAC_RMR_RFAF_4_64 0x00000002
132#define EMAC_RMR_RFAF_8_128 0x00000003 189#define EMAC_RMR_RFAF_8_128 0x00000003
@@ -134,9 +191,21 @@ typedef struct emac_regs {
134#define EMAC_RMR_RFAF_32_512 0x00000005 191#define EMAC_RMR_RFAF_32_512 0x00000005
135#define EMAC_RMR_RFAF_64_1024 0x00000006 192#define EMAC_RMR_RFAF_64_1024 0x00000006
136#define EMAC_RMR_RFAF_128_2048 0x00000007 193#define EMAC_RMR_RFAF_128_2048 0x00000007
137#define EMAC_RMR_BASE (EMAC_RMR_IAE | EMAC_RMR_BAE) 194#define EMAC_RMR_BASE EMAC_RMR_RFAF_128_2048
195#endif
138 196
139/* Interrupt Status & enable Regs */ 197/* EMACx_ISR & EMACx_ISER */
198#if !defined(CONFIG_IBM_EMAC4)
199#define EMAC_ISR_TXPE 0x00000000
200#define EMAC_ISR_RXPE 0x00000000
201#define EMAC_ISR_TXUE 0x00000000
202#define EMAC_ISR_RXOE 0x00000000
203#else
204#define EMAC_ISR_TXPE 0x20000000
205#define EMAC_ISR_RXPE 0x10000000
206#define EMAC_ISR_TXUE 0x08000000
207#define EMAC_ISR_RXOE 0x04000000
208#endif
140#define EMAC_ISR_OVR 0x02000000 209#define EMAC_ISR_OVR 0x02000000
141#define EMAC_ISR_PP 0x01000000 210#define EMAC_ISR_PP 0x01000000
142#define EMAC_ISR_BP 0x00800000 211#define EMAC_ISR_BP 0x00800000
@@ -147,53 +216,62 @@ typedef struct emac_regs {
147#define EMAC_ISR_PTLE 0x00040000 216#define EMAC_ISR_PTLE 0x00040000
148#define EMAC_ISR_ORE 0x00020000 217#define EMAC_ISR_ORE 0x00020000
149#define EMAC_ISR_IRE 0x00010000 218#define EMAC_ISR_IRE 0x00010000
150#define EMAC_ISR_DBDM 0x00000200 219#define EMAC_ISR_SQE 0x00000080
151#define EMAC_ISR_DB0 0x00000100 220#define EMAC_ISR_TE 0x00000040
152#define EMAC_ISR_SE0 0x00000080
153#define EMAC_ISR_TE0 0x00000040
154#define EMAC_ISR_DB1 0x00000020
155#define EMAC_ISR_SE1 0x00000010
156#define EMAC_ISR_TE1 0x00000008
157#define EMAC_ISR_MOS 0x00000002 221#define EMAC_ISR_MOS 0x00000002
158#define EMAC_ISR_MOF 0x00000001 222#define EMAC_ISR_MOF 0x00000001
159 223
160/* STA CONTROL REG */ 224/* EMACx_STACR */
225#define EMAC_STACR_PHYD_MASK 0xffff
226#define EMAC_STACR_PHYD_SHIFT 16
161#define EMAC_STACR_OC 0x00008000 227#define EMAC_STACR_OC 0x00008000
162#define EMAC_STACR_PHYE 0x00004000 228#define EMAC_STACR_PHYE 0x00004000
163#define EMAC_STACR_WRITE 0x00002000 229#define EMAC_STACR_STAC_MASK 0x00003000
164#define EMAC_STACR_READ 0x00001000 230#define EMAC_STACR_STAC_READ 0x00001000
165#define EMAC_STACR_CLK_83MHZ 0x00000800 /* 0's for 50Mhz */ 231#define EMAC_STACR_STAC_WRITE 0x00002000
166#define EMAC_STACR_CLK_66MHZ 0x00000400 232#if !defined(CONFIG_IBM_EMAC4)
167#define EMAC_STACR_CLK_100MHZ 0x00000C00 233#define EMAC_STACR_OPBC_MASK 0x00000C00
234#define EMAC_STACR_OPBC_50 0x00000000
235#define EMAC_STACR_OPBC_66 0x00000400
236#define EMAC_STACR_OPBC_83 0x00000800
237#define EMAC_STACR_OPBC_100 0x00000C00
238#define EMAC_STACR_OPBC(freq) ((freq) <= 50 ? EMAC_STACR_OPBC_50 : \
239 (freq) <= 66 ? EMAC_STACR_OPBC_66 : \
240 (freq) <= 83 ? EMAC_STACR_OPBC_83 : EMAC_STACR_OPBC_100)
241#define EMAC_STACR_BASE(opb) EMAC_STACR_OPBC(opb)
242#else
243#define EMAC_STACR_BASE(opb) 0x00000000
244#endif
245#define EMAC_STACR_PCDA_MASK 0x1f
246#define EMAC_STACR_PCDA_SHIFT 5
247#define EMAC_STACR_PRA_MASK 0x1f
248
249/* EMACx_TRTR */
250#if !defined(CONFIG_IBM_EMAC4)
251#define EMAC_TRTR_SHIFT 27
252#else
253#define EMAC_TRTR_SHIFT 24
254#endif
255#define EMAC_TRTR(size) ((((size) >> 6) - 1) << EMAC_TRTR_SHIFT)
168 256
169/* Transmit Request Threshold Register */ 257/* EMACx_RWMR */
170#define EMAC_TRTR_1600 0x18000000 /* 0's for 64 Bytes */ 258#if !defined(CONFIG_IBM_EMAC4)
171#define EMAC_TRTR_1024 0x0f000000 259#define EMAC_RWMR(l,h) (((l) << 23) | ( ((h) & 0x1ff) << 7))
172#define EMAC_TRTR_512 0x07000000 260#else
173#define EMAC_TRTR_256 0x03000000 261#define EMAC_RWMR(l,h) (((l) << 22) | ( ((h) & 0x3ff) << 6))
174#define EMAC_TRTR_192 0x10000000 262#endif
175#define EMAC_TRTR_128 0x01000000
176 263
264/* EMAC specific TX descriptor control fields (write access) */
177#define EMAC_TX_CTRL_GFCS 0x0200 265#define EMAC_TX_CTRL_GFCS 0x0200
178#define EMAC_TX_CTRL_GP 0x0100 266#define EMAC_TX_CTRL_GP 0x0100
179#define EMAC_TX_CTRL_ISA 0x0080 267#define EMAC_TX_CTRL_ISA 0x0080
180#define EMAC_TX_CTRL_RSA 0x0040 268#define EMAC_TX_CTRL_RSA 0x0040
181#define EMAC_TX_CTRL_IVT 0x0020 269#define EMAC_TX_CTRL_IVT 0x0020
182#define EMAC_TX_CTRL_RVT 0x0010 270#define EMAC_TX_CTRL_RVT 0x0010
183#define EMAC_TX_CTRL_TAH_CSUM 0x000e /* TAH only */ 271#define EMAC_TX_CTRL_TAH_CSUM 0x000e
184#define EMAC_TX_CTRL_TAH_SEG4 0x000a /* TAH only */
185#define EMAC_TX_CTRL_TAH_SEG3 0x0008 /* TAH only */
186#define EMAC_TX_CTRL_TAH_SEG2 0x0006 /* TAH only */
187#define EMAC_TX_CTRL_TAH_SEG1 0x0004 /* TAH only */
188#define EMAC_TX_CTRL_TAH_SEG0 0x0002 /* TAH only */
189#define EMAC_TX_CTRL_TAH_DIS 0x0000 /* TAH only */
190 272
191#define EMAC_TX_CTRL_DFLT ( \ 273/* EMAC specific TX descriptor status fields (read access) */
192 MAL_TX_CTRL_INTR | EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP )
193
194/* madmal transmit status / Control bits */
195#define EMAC_TX_ST_BFCS 0x0200 274#define EMAC_TX_ST_BFCS 0x0200
196#define EMAC_TX_ST_BPP 0x0100
197#define EMAC_TX_ST_LCS 0x0080 275#define EMAC_TX_ST_LCS 0x0080
198#define EMAC_TX_ST_ED 0x0040 276#define EMAC_TX_ST_ED 0x0040
199#define EMAC_TX_ST_EC 0x0020 277#define EMAC_TX_ST_EC 0x0020
@@ -202,8 +280,16 @@ typedef struct emac_regs {
202#define EMAC_TX_ST_SC 0x0004 280#define EMAC_TX_ST_SC 0x0004
203#define EMAC_TX_ST_UR 0x0002 281#define EMAC_TX_ST_UR 0x0002
204#define EMAC_TX_ST_SQE 0x0001 282#define EMAC_TX_ST_SQE 0x0001
283#if !defined(CONFIG_IBM_EMAC_TAH)
284#define EMAC_IS_BAD_TX(v) ((v) & (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
285 EMAC_TX_ST_EC | EMAC_TX_ST_LC | \
286 EMAC_TX_ST_MC | EMAC_TX_ST_UR))
287#else
288#define EMAC_IS_BAD_TX(v) ((v) & (EMAC_TX_ST_LCS | EMAC_TX_ST_ED | \
289 EMAC_TX_ST_EC | EMAC_TX_ST_LC))
290#endif
205 291
206/* madmal receive status / Control bits */ 292/* EMAC specific RX descriptor status fields (read access) */
207#define EMAC_RX_ST_OE 0x0200 293#define EMAC_RX_ST_OE 0x0200
208#define EMAC_RX_ST_PP 0x0100 294#define EMAC_RX_ST_PP 0x0100
209#define EMAC_RX_ST_BP 0x0080 295#define EMAC_RX_ST_BP 0x0080
@@ -214,54 +300,10 @@ typedef struct emac_regs {
214#define EMAC_RX_ST_PTL 0x0004 300#define EMAC_RX_ST_PTL 0x0004
215#define EMAC_RX_ST_ORE 0x0002 301#define EMAC_RX_ST_ORE 0x0002
216#define EMAC_RX_ST_IRE 0x0001 302#define EMAC_RX_ST_IRE 0x0001
217#define EMAC_BAD_RX_PACKET 0x02ff 303#define EMAC_RX_TAH_BAD_CSUM 0x0003
218#define EMAC_CSUM_VER_ERROR 0x0003 304#define EMAC_BAD_RX_MASK (EMAC_RX_ST_OE | EMAC_RX_ST_BP | \
219 305 EMAC_RX_ST_RP | EMAC_RX_ST_SE | \
220/* identify a bad rx packet dependent on emac features */ 306 EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
221#ifdef CONFIG_IBM_EMAC4 307 EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
222#define EMAC_IS_BAD_RX_PACKET(desc) \ 308 EMAC_RX_ST_IRE )
223 (((desc & (EMAC_BAD_RX_PACKET & ~EMAC_CSUM_VER_ERROR)) || \ 309#endif /* __IBM_EMAC_H_ */
224 ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_ORE) || \
225 ((desc & EMAC_CSUM_VER_ERROR) == EMAC_RX_ST_IRE)))
226#else
227#define EMAC_IS_BAD_RX_PACKET(desc) \
228 (desc & EMAC_BAD_RX_PACKET)
229#endif
230
231/* SoC implementation specific EMAC register defaults */
232#if defined(CONFIG_440GP)
233#define EMAC_RWMR_DEFAULT 0x80009000
234#define EMAC_TMR0_DEFAULT 0x00000000
235#define EMAC_TMR1_DEFAULT 0xf8640000
236#elif defined(CONFIG_440GX)
237#define EMAC_RWMR_DEFAULT 0x1000a200
238#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_2_32
239#define EMAC_TMR1_DEFAULT 0xa00f0000
240#elif defined(CONFIG_440SP)
241#define EMAC_RWMR_DEFAULT 0x08002000
242#define EMAC_TMR0_DEFAULT EMAC_TMR0_TFAE_128_2048
243#define EMAC_TMR1_DEFAULT 0xf8200000
244#else
245#define EMAC_RWMR_DEFAULT 0x0f002000
246#define EMAC_TMR0_DEFAULT 0x00000000
247#define EMAC_TMR1_DEFAULT 0x380f0000
248#endif /* CONFIG_440GP */
249
250/* Revision specific EMAC register defaults */
251#ifdef CONFIG_IBM_EMAC4
252#define EMAC_M1_DEFAULT (EMAC_M1_BASE | \
253 EMAC_M1_OPB_CLK_83 | \
254 EMAC_M1_TX_MWSW)
255#define EMAC_RMR_DEFAULT (EMAC_RMR_BASE | \
256 EMAC_RMR_RFAF_128_2048)
257#define EMAC_TMR0_XMIT (EMAC_TMR0_GNP0 | \
258 EMAC_TMR0_DEFAULT)
259#define EMAC_TRTR_DEFAULT EMAC_TRTR_1024
260#else /* !CONFIG_IBM_EMAC4 */
261#define EMAC_M1_DEFAULT EMAC_M1_BASE
262#define EMAC_RMR_DEFAULT EMAC_RMR_BASE
263#define EMAC_TMR0_XMIT EMAC_TMR0_GNP0
264#define EMAC_TRTR_DEFAULT EMAC_TRTR_1600
265#endif /* CONFIG_IBM_EMAC4 */
266
267#endif
diff --git a/drivers/net/ibm_emac/ibm_emac_core.c b/drivers/net/ibm_emac/ibm_emac_core.c
index 14e9b6315f20..943fbd1546ff 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.c
+++ b/drivers/net/ibm_emac/ibm_emac_core.c
@@ -1,13 +1,14 @@
1/* 1/*
2 * ibm_emac_core.c 2 * drivers/net/ibm_emac/ibm_emac_core.c
3 * 3 *
4 * Ethernet driver for the built in ethernet on the IBM 4xx PowerPC 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * processors.
6 *
7 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
8 * 5 *
9 * Based on original work by 6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
10 * 8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
11 * Armin Kuster <akuster@mvista.com> 12 * Armin Kuster <akuster@mvista.com>
12 * Johnnie Peters <jpeters@mvista.com> 13 * Johnnie Peters <jpeters@mvista.com>
13 * 14 *
@@ -15,29 +16,24 @@
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version. 18 * option) any later version.
18 * TODO 19 *
19 * - Check for races in the "remove" code path
20 * - Add some Power Management to the MAC and the PHY
21 * - Audit remaining of non-rewritten code (--BenH)
22 * - Cleanup message display using msglevel mecanism
23 * - Address all errata
24 * - Audit all register update paths to ensure they
25 * are being written post soft reset if required.
26 */ 20 */
21
22#include <linux/config.h>
27#include <linux/module.h> 23#include <linux/module.h>
28#include <linux/kernel.h> 24#include <linux/kernel.h>
29#include <linux/sched.h> 25#include <linux/sched.h>
30#include <linux/string.h> 26#include <linux/string.h>
31#include <linux/timer.h>
32#include <linux/ptrace.h>
33#include <linux/errno.h> 27#include <linux/errno.h>
34#include <linux/ioport.h>
35#include <linux/slab.h>
36#include <linux/interrupt.h> 28#include <linux/interrupt.h>
37#include <linux/delay.h> 29#include <linux/delay.h>
38#include <linux/init.h> 30#include <linux/init.h>
39#include <linux/types.h> 31#include <linux/types.h>
40#include <linux/dma-mapping.h> 32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/skbuff.h>
36#include <linux/crc32.h>
41#include <linux/ethtool.h> 37#include <linux/ethtool.h>
42#include <linux/mii.h> 38#include <linux/mii.h>
43#include <linux/bitops.h> 39#include <linux/bitops.h>
@@ -45,1691 +41,1893 @@
45#include <asm/processor.h> 41#include <asm/processor.h>
46#include <asm/io.h> 42#include <asm/io.h>
47#include <asm/dma.h> 43#include <asm/dma.h>
48#include <asm/irq.h>
49#include <asm/uaccess.h> 44#include <asm/uaccess.h>
50#include <asm/ocp.h> 45#include <asm/ocp.h>
51 46
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/skbuff.h>
55#include <linux/crc32.h>
56
57#include "ibm_emac_core.h" 47#include "ibm_emac_core.h"
58 48#include "ibm_emac_debug.h"
59//#define MDIO_DEBUG(fmt) printk fmt
60#define MDIO_DEBUG(fmt)
61
62//#define LINK_DEBUG(fmt) printk fmt
63#define LINK_DEBUG(fmt)
64
65//#define PKT_DEBUG(fmt) printk fmt
66#define PKT_DEBUG(fmt)
67
68#define DRV_NAME "emac"
69#define DRV_VERSION "2.0"
70#define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
71#define DRV_DESC "IBM EMAC Ethernet driver"
72 49
73/* 50/*
74 * When mdio_idx >= 0, contains a list of emac ocp_devs 51 * Lack of dma_unmap_???? calls is intentional.
75 * that have had their initialization deferred until the 52 *
76 * common MDIO controller has been initialized. 53 * API-correct usage requires additional support state information to be
54 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55 * EMAC design (e.g. TX buffer passed from network stack can be split into
56 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57 * maintaining such information will add additional overhead.
58 * Current DMA API implementation for 4xx processors only ensures cache coherency
59 * and dma_unmap_???? routines are empty and are likely to stay this way.
60 * I decided to omit dma_unmap_??? calls because I don't want to add additional
61 * complexity just for the sake of following some abstract API, when it doesn't
62 * add any real benefit to the driver. I understand that this decision maybe
63 * controversial, but I really tried to make code API-correct and efficient
64 * at the same time and didn't come up with code I liked :(. --ebs
77 */ 65 */
78LIST_HEAD(emac_init_list);
79 66
80MODULE_AUTHOR(DRV_AUTHOR); 67#define DRV_NAME "emac"
68#define DRV_VERSION "3.53"
69#define DRV_DESC "PPC 4xx OCP EMAC driver"
70
81MODULE_DESCRIPTION(DRV_DESC); 71MODULE_DESCRIPTION(DRV_DESC);
72MODULE_AUTHOR
73 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
82MODULE_LICENSE("GPL"); 74MODULE_LICENSE("GPL");
83 75
84static int skb_res = SKB_RES; 76/* minimum number of free TX descriptors required to wake up TX process */
85module_param(skb_res, int, 0444); 77#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
86MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
87 "The 405 handles a misaligned IP header fine but\n"
88 "this can help if you are routing to a tunnel or a\n"
89 "device that needs aligned data. 0..2");
90
91#define RGMII_PRIV(ocpdev) ((struct ibm_ocp_rgmii*)ocp_get_drvdata(ocpdev))
92 78
93static unsigned int rgmii_enable[] = { 79/* If packet size is less than this number, we allocate small skb and copy packet
94 RGMII_RTBI, 80 * contents into it instead of just sending original big skb up
95 RGMII_RGMII, 81 */
96 RGMII_TBI, 82#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
97 RGMII_GMII
98};
99 83
100static unsigned int rgmii_speed_mask[] = { 84/* Since multiple EMACs share MDIO lines in various ways, we need
101 RGMII_MII2_SPDMASK, 85 * to avoid re-using the same PHY ID in cases where the arch didn't
102 RGMII_MII3_SPDMASK 86 * setup precise phy_map entries
103}; 87 */
88static u32 busy_phy_map;
104 89
105static unsigned int rgmii_speed100[] = { 90#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && (defined(CONFIG_405EP) || defined(CONFIG_440EP))
106 RGMII_MII2_100MB, 91/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
107 RGMII_MII3_100MB 92 * with PHY RX clock problem.
108}; 93 * 440EP has more sane SDR0_MFR register implementation than 440GX, which
94 * also allows controlling each EMAC clock
95 */
96static inline void EMAC_RX_CLK_TX(int idx)
97{
98 unsigned long flags;
99 local_irq_save(flags);
109 100
110static unsigned int rgmii_speed1000[] = { 101#if defined(CONFIG_405EP)
111 RGMII_MII2_1000MB, 102 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
112 RGMII_MII3_1000MB 103#else /* CONFIG_440EP */
113}; 104 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
105#endif
114 106
115#define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev)) 107 local_irq_restore(flags);
108}
116 109
117static unsigned int zmii_enable[][4] = { 110static inline void EMAC_RX_CLK_DEFAULT(int idx)
118 {ZMII_SMII0, ZMII_RMII0, ZMII_MII0, 111{
119 ~(ZMII_MDI1 | ZMII_MDI2 | ZMII_MDI3)}, 112 unsigned long flags;
120 {ZMII_SMII1, ZMII_RMII1, ZMII_MII1, 113 local_irq_save(flags);
121 ~(ZMII_MDI0 | ZMII_MDI2 | ZMII_MDI3)},
122 {ZMII_SMII2, ZMII_RMII2, ZMII_MII2,
123 ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
124 {ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
125};
126 114
127static unsigned int mdi_enable[] = { 115#if defined(CONFIG_405EP)
128 ZMII_MDI0, 116 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
129 ZMII_MDI1, 117#else /* CONFIG_440EP */
130 ZMII_MDI2, 118 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
131 ZMII_MDI3 119#endif
132};
133 120
134static unsigned int zmii_speed = 0x0; 121 local_irq_restore(flags);
135static unsigned int zmii_speed100[] = { 122}
136 ZMII_MII0_100MB, 123#else
137 ZMII_MII1_100MB, 124#define EMAC_RX_CLK_TX(idx) ((void)0)
138 ZMII_MII2_100MB, 125#define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
139 ZMII_MII3_100MB 126#endif
140};
141 127
142/* Since multiple EMACs share MDIO lines in various ways, we need 128#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
143 * to avoid re-using the same PHY ID in cases where the arch didn't 129/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
144 * setup precise phy_map entries 130 * unfortunately this is less flexible than 440EP case, because it's a global
131 * setting for all EMACs, therefore we do this clock trick only during probe.
145 */ 132 */
146static u32 busy_phy_map = 0; 133#define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
135#define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
136 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
137#else
138#define EMAC_CLK_INTERNAL ((void)0)
139#define EMAC_CLK_EXTERNAL ((void)0)
140#endif
147 141
148/* If EMACs share a common MDIO device, this points to it */ 142/* I don't want to litter system log with timeout errors
149static struct net_device *mdio_ndev = NULL; 143 * when we have brain-damaged PHY.
144 */
145static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
146 const char *error)
147{
148#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
149 DBG("%d: %s" NL, dev->def->index, error);
150#else
151 if (net_ratelimit())
152 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
153#endif
154}
150 155
151struct emac_def_dev { 156/* PHY polling intervals */
152 struct list_head link; 157#define PHY_POLL_LINK_ON HZ
153 struct ocp_device *ocpdev; 158#define PHY_POLL_LINK_OFF (HZ / 5)
154 struct ibm_ocp_mal *mal; 159
160/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
161static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
162 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
163 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
164 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
165 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
166 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
167 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
168 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
169 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
170 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
171 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
172 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
173 "tx_bd_excessive_collisions", "tx_bd_late_collision",
174 "tx_bd_multple_collisions", "tx_bd_single_collision",
175 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
176 "tx_errors"
155}; 177};
156 178
157static struct net_device_stats *emac_stats(struct net_device *dev) 179static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
180static void emac_clean_tx_ring(struct ocp_enet_private *dev);
181
182static inline int emac_phy_supports_gige(int phy_mode)
158{ 183{
159 struct ocp_enet_private *fep = dev->priv; 184 return phy_mode == PHY_MODE_GMII ||
160 return &fep->stats; 185 phy_mode == PHY_MODE_RGMII ||
161}; 186 phy_mode == PHY_MODE_TBI ||
187 phy_mode == PHY_MODE_RTBI;
188}
162 189
163static int 190static inline int emac_phy_gpcs(int phy_mode)
164emac_init_rgmii(struct ocp_device *rgmii_dev, int input, int phy_mode)
165{ 191{
166 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(rgmii_dev); 192 return phy_mode == PHY_MODE_TBI ||
167 const char *mode_name[] = { "RTBI", "RGMII", "TBI", "GMII" }; 193 phy_mode == PHY_MODE_RTBI;
168 int mode = -1; 194}
169 195
170 if (!rgmii) { 196static inline void emac_tx_enable(struct ocp_enet_private *dev)
171 rgmii = kmalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL); 197{
198 struct emac_regs *p = dev->emacp;
199 unsigned long flags;
200 u32 r;
172 201
173 if (rgmii == NULL) { 202 local_irq_save(flags);
174 printk(KERN_ERR
175 "rgmii%d: Out of memory allocating RGMII structure!\n",
176 rgmii_dev->def->index);
177 return -ENOMEM;
178 }
179 203
180 memset(rgmii, 0, sizeof(*rgmii)); 204 DBG("%d: tx_enable" NL, dev->def->index);
181 205
182 rgmii->base = 206 r = in_be32(&p->mr0);
183 (struct rgmii_regs *)ioremap(rgmii_dev->def->paddr, 207 if (!(r & EMAC_MR0_TXE))
184 sizeof(*rgmii->base)); 208 out_be32(&p->mr0, r | EMAC_MR0_TXE);
185 if (rgmii->base == NULL) { 209 local_irq_restore(flags);
186 printk(KERN_ERR 210}
187 "rgmii%d: Cannot ioremap bridge registers!\n",
188 rgmii_dev->def->index);
189 211
190 kfree(rgmii); 212static void emac_tx_disable(struct ocp_enet_private *dev)
191 return -ENOMEM; 213{
192 } 214 struct emac_regs *p = dev->emacp;
193 ocp_set_drvdata(rgmii_dev, rgmii); 215 unsigned long flags;
194 } 216 u32 r;
195 217
196 if (phy_mode) { 218 local_irq_save(flags);
197 switch (phy_mode) { 219
198 case PHY_MODE_GMII: 220 DBG("%d: tx_disable" NL, dev->def->index);
199 mode = GMII;
200 break;
201 case PHY_MODE_TBI:
202 mode = TBI;
203 break;
204 case PHY_MODE_RTBI:
205 mode = RTBI;
206 break;
207 case PHY_MODE_RGMII:
208 default:
209 mode = RGMII;
210 }
211 rgmii->base->fer &= ~RGMII_FER_MASK(input);
212 rgmii->base->fer |= rgmii_enable[mode] << (4 * input);
213 } else {
214 switch ((rgmii->base->fer & RGMII_FER_MASK(input)) >> (4 *
215 input)) {
216 case RGMII_RTBI:
217 mode = RTBI;
218 break;
219 case RGMII_RGMII:
220 mode = RGMII;
221 break;
222 case RGMII_TBI:
223 mode = TBI;
224 break;
225 case RGMII_GMII:
226 mode = GMII;
227 }
228 }
229 221
230 /* Set mode to RGMII if nothing valid is detected */ 222 r = in_be32(&p->mr0);
231 if (mode < 0) 223 if (r & EMAC_MR0_TXE) {
232 mode = RGMII; 224 int n = 300;
225 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
226 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
227 --n;
228 if (unlikely(!n))
229 emac_report_timeout_error(dev, "TX disable timeout");
230 }
231 local_irq_restore(flags);
232}
233 233
234 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n", 234static void emac_rx_enable(struct ocp_enet_private *dev)
235 rgmii_dev->def->index, input, mode_name[mode]); 235{
236 struct emac_regs *p = dev->emacp;
237 unsigned long flags;
238 u32 r;
236 239
237 rgmii->mode[input] = mode; 240 local_irq_save(flags);
238 rgmii->users++; 241 if (unlikely(dev->commac.rx_stopped))
242 goto out;
239 243
240 return 0; 244 DBG("%d: rx_enable" NL, dev->def->index);
245
246 r = in_be32(&p->mr0);
247 if (!(r & EMAC_MR0_RXE)) {
248 if (unlikely(!(r & EMAC_MR0_RXI))) {
249 /* Wait if previous async disable is still in progress */
250 int n = 100;
251 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
252 --n;
253 if (unlikely(!n))
254 emac_report_timeout_error(dev,
255 "RX disable timeout");
256 }
257 out_be32(&p->mr0, r | EMAC_MR0_RXE);
258 }
259 out:
260 local_irq_restore(flags);
241} 261}
242 262
243static void 263static void emac_rx_disable(struct ocp_enet_private *dev)
244emac_rgmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
245{ 264{
246 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev); 265 struct emac_regs *p = dev->emacp;
247 unsigned int rgmii_speed; 266 unsigned long flags;
248 267 u32 r;
249 rgmii_speed = in_be32(&rgmii->base->ssr);
250 268
251 rgmii_speed &= ~rgmii_speed_mask[input]; 269 local_irq_save(flags);
252 270
253 if (speed == 1000) 271 DBG("%d: rx_disable" NL, dev->def->index);
254 rgmii_speed |= rgmii_speed1000[input];
255 else if (speed == 100)
256 rgmii_speed |= rgmii_speed100[input];
257 272
258 out_be32(&rgmii->base->ssr, rgmii_speed); 273 r = in_be32(&p->mr0);
274 if (r & EMAC_MR0_RXE) {
275 int n = 300;
276 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
277 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
278 --n;
279 if (unlikely(!n))
280 emac_report_timeout_error(dev, "RX disable timeout");
281 }
282 local_irq_restore(flags);
259} 283}
260 284
261static void emac_close_rgmii(struct ocp_device *ocpdev) 285static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
262{ 286{
263 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(ocpdev); 287 struct emac_regs *p = dev->emacp;
264 BUG_ON(!rgmii || rgmii->users == 0); 288 unsigned long flags;
289 u32 r;
265 290
266 if (!--rgmii->users) { 291 local_irq_save(flags);
267 ocp_set_drvdata(ocpdev, NULL); 292
268 iounmap((void *)rgmii->base); 293 DBG("%d: rx_disable_async" NL, dev->def->index);
269 kfree(rgmii); 294
270 } 295 r = in_be32(&p->mr0);
296 if (r & EMAC_MR0_RXE)
297 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
298 local_irq_restore(flags);
271} 299}
272 300
273static int emac_init_zmii(struct ocp_device *zmii_dev, int input, int phy_mode) 301static int emac_reset(struct ocp_enet_private *dev)
274{ 302{
275 struct ibm_ocp_zmii *zmii = ZMII_PRIV(zmii_dev); 303 struct emac_regs *p = dev->emacp;
276 const char *mode_name[] = { "SMII", "RMII", "MII" }; 304 unsigned long flags;
277 int mode = -1; 305 int n = 20;
278 306
279 if (!zmii) { 307 DBG("%d: reset" NL, dev->def->index);
280 zmii = kmalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
281 if (zmii == NULL) {
282 printk(KERN_ERR
283 "zmii%d: Out of memory allocating ZMII structure!\n",
284 zmii_dev->def->index);
285 return -ENOMEM;
286 }
287 memset(zmii, 0, sizeof(*zmii));
288 308
289 zmii->base = 309 local_irq_save(flags);
290 (struct zmii_regs *)ioremap(zmii_dev->def->paddr,
291 sizeof(*zmii->base));
292 if (zmii->base == NULL) {
293 printk(KERN_ERR
294 "zmii%d: Cannot ioremap bridge registers!\n",
295 zmii_dev->def->index);
296 310
297 kfree(zmii); 311 if (!dev->reset_failed) {
298 return -ENOMEM; 312 /* 40x erratum suggests stopping RX channel before reset,
299 } 313 * we stop TX as well
300 ocp_set_drvdata(zmii_dev, zmii); 314 */
315 emac_rx_disable(dev);
316 emac_tx_disable(dev);
301 } 317 }
302 318
303 if (phy_mode) { 319 out_be32(&p->mr0, EMAC_MR0_SRST);
304 switch (phy_mode) { 320 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
305 case PHY_MODE_MII: 321 --n;
306 mode = MII; 322 local_irq_restore(flags);
307 break; 323
308 case PHY_MODE_RMII: 324 if (n) {
309 mode = RMII; 325 dev->reset_failed = 0;
310 break; 326 return 0;
311 case PHY_MODE_SMII:
312 default:
313 mode = SMII;
314 }
315 zmii->base->fer &= ~ZMII_FER_MASK(input);
316 zmii->base->fer |= zmii_enable[input][mode];
317 } else { 327 } else {
318 switch ((zmii->base->fer & ZMII_FER_MASK(input)) << (4 * input)) { 328 emac_report_timeout_error(dev, "reset timeout");
319 case ZMII_MII0: 329 dev->reset_failed = 1;
320 mode = MII; 330 return -ETIMEDOUT;
321 break;
322 case ZMII_RMII0:
323 mode = RMII;
324 break;
325 case ZMII_SMII0:
326 mode = SMII;
327 }
328 } 331 }
332}
329 333
330 /* Set mode to SMII if nothing valid is detected */ 334static void emac_hash_mc(struct ocp_enet_private *dev)
331 if (mode < 0) 335{
332 mode = SMII; 336 struct emac_regs *p = dev->emacp;
337 u16 gaht[4] = { 0 };
338 struct dev_mc_list *dmi;
333 339
334 printk(KERN_NOTICE "zmii%d: input %d in %s mode\n", 340 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
335 zmii_dev->def->index, input, mode_name[mode]);
336 341
337 zmii->mode[input] = mode; 342 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
338 zmii->users++; 343 int bit;
344 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
345 dev->def->index,
346 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
347 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
339 348
340 return 0; 349 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
350 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
351 }
352 out_be32(&p->gaht1, gaht[0]);
353 out_be32(&p->gaht2, gaht[1]);
354 out_be32(&p->gaht3, gaht[2]);
355 out_be32(&p->gaht4, gaht[3]);
341} 356}
342 357
343static void emac_enable_zmii_port(struct ocp_device *ocpdev, int input) 358static inline u32 emac_iff2rmr(struct net_device *ndev)
344{ 359{
345 u32 mask; 360 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
346 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 361 EMAC_RMR_BASE;
347 362
348 mask = in_be32(&zmii->base->fer); 363 if (ndev->flags & IFF_PROMISC)
349 mask &= zmii_enable[input][MDI]; /* turn all non enabled MDI's off */ 364 r |= EMAC_RMR_PME;
350 mask |= zmii_enable[input][zmii->mode[input]] | mdi_enable[input]; 365 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
351 out_be32(&zmii->base->fer, mask); 366 r |= EMAC_RMR_PMME;
367 else if (ndev->mc_count > 0)
368 r |= EMAC_RMR_MAE;
369
370 return r;
352} 371}
353 372
354static void 373static inline int emac_opb_mhz(void)
355emac_zmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
356{ 374{
357 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 375 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
358
359 if (speed == 100)
360 zmii_speed |= zmii_speed100[input];
361 else
362 zmii_speed &= ~zmii_speed100[input];
363
364 out_be32(&zmii->base->ssr, zmii_speed);
365} 376}
366 377
367static void emac_close_zmii(struct ocp_device *ocpdev) 378/* BHs disabled */
379static int emac_configure(struct ocp_enet_private *dev)
368{ 380{
369 struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev); 381 struct emac_regs *p = dev->emacp;
370 BUG_ON(!zmii || zmii->users == 0); 382 struct net_device *ndev = dev->ndev;
383 int gige;
384 u32 r;
371 385
372 if (!--zmii->users) { 386 DBG("%d: configure" NL, dev->def->index);
373 ocp_set_drvdata(ocpdev, NULL);
374 iounmap((void *)zmii->base);
375 kfree(zmii);
376 }
377}
378 387
379int emac_phy_read(struct net_device *dev, int mii_id, int reg) 388 if (emac_reset(dev) < 0)
380{ 389 return -ETIMEDOUT;
381 int count;
382 uint32_t stacr;
383 struct ocp_enet_private *fep = dev->priv;
384 emac_t *emacp = fep->emacp;
385 390
386 MDIO_DEBUG(("%s: phy_read, id: 0x%x, reg: 0x%x\n", dev->name, mii_id, 391 tah_reset(dev->tah_dev);
387 reg));
388 392
389 /* Enable proper ZMII port */ 393 /* Mode register */
390 if (fep->zmii_dev) 394 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
391 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input); 395 if (dev->phy.duplex == DUPLEX_FULL)
396 r |= EMAC_MR1_FDE;
397 switch (dev->phy.speed) {
398 case SPEED_1000:
399 if (emac_phy_gpcs(dev->phy.mode)) {
400 r |= EMAC_MR1_MF_1000GPCS |
401 EMAC_MR1_MF_IPPA(dev->phy.address);
392 402
393 /* Use the EMAC that has the MDIO port */ 403 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
394 if (fep->mdio_dev) { 404 * identify this GPCS PHY later.
395 dev = fep->mdio_dev; 405 */
396 fep = dev->priv; 406 out_be32(&p->ipcr, 0xdeadbeef);
397 emacp = fep->emacp; 407 } else
408 r |= EMAC_MR1_MF_1000;
409 r |= EMAC_MR1_RFS_16K;
410 gige = 1;
411
412 if (dev->ndev->mtu > ETH_DATA_LEN)
413 r |= EMAC_MR1_JPSM;
414 break;
415 case SPEED_100:
416 r |= EMAC_MR1_MF_100;
417 /* Fall through */
418 default:
419 r |= EMAC_MR1_RFS_4K;
420 gige = 0;
421 break;
398 } 422 }
399 423
400 count = 0; 424 if (dev->rgmii_dev)
401 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 425 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
402 && (count++ < MDIO_DELAY)) 426 dev->phy.speed);
403 udelay(1); 427 else
404 MDIO_DEBUG((" (count was %d)\n", count)); 428 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
405 429
406 if ((stacr & EMAC_STACR_OC) == 0) { 430#if !defined(CONFIG_40x)
407 printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name); 431 /* on 40x erratum forces us to NOT use integrated flow control,
408 return -1; 432 * let's hope it works on 44x ;)
433 */
434 if (dev->phy.duplex == DUPLEX_FULL) {
435 if (dev->phy.pause)
436 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
437 else if (dev->phy.asym_pause)
438 r |= EMAC_MR1_APP;
409 } 439 }
440#endif
441 out_be32(&p->mr1, r);
442
443 /* Set individual MAC address */
444 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
445 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
446 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
447 ndev->dev_addr[5]);
448
449 /* VLAN Tag Protocol ID */
450 out_be32(&p->vtpid, 0x8100);
451
452 /* Receive mode register */
453 r = emac_iff2rmr(ndev);
454 if (r & EMAC_RMR_MAE)
455 emac_hash_mc(dev);
456 out_be32(&p->rmr, r);
457
458 /* FIFOs thresholds */
459 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
460 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
461 out_be32(&p->tmr1, r);
462 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
463
464 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
465 there should be still enough space in FIFO to allow the our link
466 partner time to process this frame and also time to send PAUSE
467 frame itself.
468
469 Here is the worst case scenario for the RX FIFO "headroom"
470 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
471
472 1) One maximum-length frame on TX 1522 bytes
473 2) One PAUSE frame time 64 bytes
474 3) PAUSE frame decode time allowance 64 bytes
475 4) One maximum-length frame on RX 1522 bytes
476 5) Round-trip propagation delay of the link (100Mb) 15 bytes
477 ----------
478 3187 bytes
479
480 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
481 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
482 */
483 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
484 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
485 out_be32(&p->rwmr, r);
486
487 /* Set PAUSE timer to the maximum */
488 out_be32(&p->ptr, 0xffff);
489
490 /* IRQ sources */
491 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
492 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
493 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
494 EMAC_ISR_IRE | EMAC_ISR_TE);
495
496 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
497 if (emac_phy_gpcs(dev->phy.mode))
498 mii_reset_phy(&dev->phy);
499
500 return 0;
501}
410 502
411 /* Clear the speed bits and make a read request to the PHY */ 503/* BHs disabled */
412 stacr = ((EMAC_STACR_READ | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ); 504static void emac_reinitialize(struct ocp_enet_private *dev)
413 stacr |= ((mii_id & 0x1F) << 5); 505{
506 DBG("%d: reinitialize" NL, dev->def->index);
414 507
415 out_be32(&emacp->em0stacr, stacr); 508 if (!emac_configure(dev)) {
509 emac_tx_enable(dev);
510 emac_rx_enable(dev);
511 }
512}
416 513
417 count = 0; 514/* BHs disabled */
418 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 515static void emac_full_tx_reset(struct net_device *ndev)
419 && (count++ < MDIO_DELAY)) 516{
420 udelay(1); 517 struct ocp_enet_private *dev = ndev->priv;
421 MDIO_DEBUG((" (count was %d)\n", count)); 518 struct ocp_func_emac_data *emacdata = dev->def->additions;
422 519
423 if ((stacr & EMAC_STACR_OC) == 0) { 520 DBG("%d: full_tx_reset" NL, dev->def->index);
424 printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
425 return -1;
426 }
427 521
428 /* Check for a read error */ 522 emac_tx_disable(dev);
429 if (stacr & EMAC_STACR_PHYE) { 523 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
430 MDIO_DEBUG(("EMAC MDIO PHY error !\n")); 524 emac_clean_tx_ring(dev);
431 return -1; 525 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
432 }
433 526
434 MDIO_DEBUG((" -> 0x%x\n", stacr >> 16)); 527 emac_configure(dev);
435 528
436 return (stacr >> 16); 529 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
530 emac_tx_enable(dev);
531 emac_rx_enable(dev);
532
533 netif_wake_queue(ndev);
437} 534}
438 535
439void emac_phy_write(struct net_device *dev, int mii_id, int reg, int data) 536static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
440{ 537{
441 int count; 538 struct emac_regs *p = dev->emacp;
442 uint32_t stacr; 539 u32 r;
443 struct ocp_enet_private *fep = dev->priv; 540 int n;
444 emac_t *emacp = fep->emacp;
445 541
446 MDIO_DEBUG(("%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x\n", 542 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
447 dev->name, mii_id, reg, data));
448 543
449 /* Enable proper ZMII port */ 544 /* Enable proper MDIO port */
450 if (fep->zmii_dev) 545 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
451 emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
452 546
453 /* Use the EMAC that has the MDIO port */ 547 /* Wait for management interface to become idle */
454 if (fep->mdio_dev) { 548 n = 10;
455 dev = fep->mdio_dev; 549 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
456 fep = dev->priv; 550 udelay(1);
457 emacp = fep->emacp; 551 if (!--n)
552 goto to;
458 } 553 }
459 554
460 count = 0; 555 /* Issue read command */
461 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 556 out_be32(&p->stacr,
462 && (count++ < MDIO_DELAY)) 557 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
558 (reg & EMAC_STACR_PRA_MASK)
559 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT));
560
561 /* Wait for read to complete */
562 n = 100;
563 while (!((r = in_be32(&p->stacr)) & EMAC_STACR_OC)) {
463 udelay(1); 564 udelay(1);
464 MDIO_DEBUG((" (count was %d)\n", count)); 565 if (!--n)
566 goto to;
567 }
465 568
466 if ((stacr & EMAC_STACR_OC) == 0) { 569 if (unlikely(r & EMAC_STACR_PHYE)) {
467 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name); 570 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
468 return; 571 id, reg);
572 return -EREMOTEIO;
469 } 573 }
470 574
471 /* Clear the speed bits and make a read request to the PHY */ 575 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
576 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
577 return r;
578 to:
579 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
580 return -ETIMEDOUT;
581}
472 582
473 stacr = ((EMAC_STACR_WRITE | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ); 583static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
474 stacr |= ((mii_id & 0x1f) << 5) | ((data & 0xffff) << 16); 584 u16 val)
585{
586 struct emac_regs *p = dev->emacp;
587 int n;
475 588
476 out_be32(&emacp->em0stacr, stacr); 589 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
590 val);
477 591
478 count = 0; 592 /* Enable proper MDIO port */
479 while ((((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC) == 0) 593 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
480 && (count++ < MDIO_DELAY)) 594
595 /* Wait for management interface to be idle */
596 n = 10;
597 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
481 udelay(1); 598 udelay(1);
482 MDIO_DEBUG((" (count was %d)\n", count)); 599 if (!--n)
600 goto to;
601 }
483 602
484 if ((stacr & EMAC_STACR_OC) == 0) 603 /* Issue write command */
485 printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name); 604 out_be32(&p->stacr,
605 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
606 (reg & EMAC_STACR_PRA_MASK) |
607 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
608 (val << EMAC_STACR_PHYD_SHIFT));
486 609
487 /* Check for a write error */ 610 /* Wait for write to complete */
488 if ((stacr & EMAC_STACR_PHYE) != 0) { 611 n = 100;
489 MDIO_DEBUG(("EMAC MDIO PHY error !\n")); 612 while (!(in_be32(&p->stacr) & EMAC_STACR_OC)) {
613 udelay(1);
614 if (!--n)
615 goto to;
490 } 616 }
617 return;
618 to:
619 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
491} 620}
492 621
493static void emac_txeob_dev(void *param, u32 chanmask) 622static int emac_mdio_read(struct net_device *ndev, int id, int reg)
494{ 623{
495 struct net_device *dev = param; 624 struct ocp_enet_private *dev = ndev->priv;
496 struct ocp_enet_private *fep = dev->priv; 625 int res;
497 unsigned long flags; 626
498 627 local_bh_disable();
499 spin_lock_irqsave(&fep->lock, flags); 628 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
500 629 (u8) reg);
501 PKT_DEBUG(("emac_txeob_dev() entry, tx_cnt: %d\n", fep->tx_cnt)); 630 local_bh_enable();
502 631 return res;
503 while (fep->tx_cnt && 632}
504 !(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {
505 633
506 if (fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_LAST) { 634static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
507 /* Tell the system the transmit completed. */ 635{
508 dma_unmap_single(&fep->ocpdev->dev, 636 struct ocp_enet_private *dev = ndev->priv;
509 fep->tx_desc[fep->ack_slot].data_ptr,
510 fep->tx_desc[fep->ack_slot].data_len,
511 DMA_TO_DEVICE);
512 dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);
513 637
514 if (fep->tx_desc[fep->ack_slot].ctrl & 638 local_bh_disable();
515 (EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC)) 639 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
516 fep->stats.collisions++; 640 (u8) reg, (u16) val);
517 } 641 local_bh_enable();
642}
518 643
519 fep->tx_skb[fep->ack_slot] = (struct sk_buff *)NULL; 644/* BHs disabled */
520 if (++fep->ack_slot == NUM_TX_BUFF) 645static void emac_set_multicast_list(struct net_device *ndev)
521 fep->ack_slot = 0; 646{
647 struct ocp_enet_private *dev = ndev->priv;
648 struct emac_regs *p = dev->emacp;
649 u32 rmr = emac_iff2rmr(ndev);
650
651 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
652 BUG_ON(!netif_running(dev->ndev));
653
654 /* I decided to relax register access rules here to avoid
655 * full EMAC reset.
656 *
657 * There is a real problem with EMAC4 core if we use MWSW_001 bit
658 * in MR1 register and do a full EMAC reset.
659 * One TX BD status update is delayed and, after EMAC reset, it
660 * never happens, resulting in TX hung (it'll be recovered by TX
661 * timeout handler eventually, but this is just gross).
662 * So we either have to do full TX reset or try to cheat here :)
663 *
664 * The only required change is to RX mode register, so I *think* all
665 * we need is just to stop RX channel. This seems to work on all
666 * tested SoCs. --ebs
667 */
668 emac_rx_disable(dev);
669 if (rmr & EMAC_RMR_MAE)
670 emac_hash_mc(dev);
671 out_be32(&p->rmr, rmr);
672 emac_rx_enable(dev);
673}
522 674
523 fep->tx_cnt--; 675/* BHs disabled */
676static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
677{
678 struct ocp_func_emac_data *emacdata = dev->def->additions;
679 int rx_sync_size = emac_rx_sync_size(new_mtu);
680 int rx_skb_size = emac_rx_skb_size(new_mtu);
681 int i, ret = 0;
682
683 emac_rx_disable(dev);
684 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
685
686 if (dev->rx_sg_skb) {
687 ++dev->estats.rx_dropped_resize;
688 dev_kfree_skb(dev->rx_sg_skb);
689 dev->rx_sg_skb = NULL;
524 } 690 }
525 if (fep->tx_cnt < NUM_TX_BUFF)
526 netif_wake_queue(dev);
527 691
528 PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt)); 692 /* Make a first pass over RX ring and mark BDs ready, dropping
693 * non-processed packets on the way. We need this as a separate pass
694 * to simplify error recovery in the case of allocation failure later.
695 */
696 for (i = 0; i < NUM_RX_BUFF; ++i) {
697 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
698 ++dev->estats.rx_dropped_resize;
529 699
530 spin_unlock_irqrestore(&fep->lock, flags); 700 dev->rx_desc[i].data_len = 0;
531} 701 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
702 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
703 }
532 704
533/* 705 /* Reallocate RX ring only if bigger skb buffers are required */
534 Fill/Re-fill the rx chain with valid ctrl/ptrs. 706 if (rx_skb_size <= dev->rx_skb_size)
535 This function will fill from rx_slot up to the parm end. 707 goto skip;
536 So to completely fill the chain pre-set rx_slot to 0 and
537 pass in an end of 0.
538 */
539static void emac_rx_fill(struct net_device *dev, int end)
540{
541 int i;
542 struct ocp_enet_private *fep = dev->priv;
543
544 i = fep->rx_slot;
545 do {
546 /* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
547 * it breaks our cache line alignement. However, we still allocate
548 * +16 so that we end up allocating the exact same size as
549 * dev_alloc_skb() would do.
550 * Also, because of the skb_res, the max DMA size we give to EMAC
551 * is slighly wrong, causing it to potentially DMA 2 more bytes
552 * from a broken/oversized packet. These 16 bytes will take care
553 * that we don't walk on somebody else toes with that.
554 */
555 fep->rx_skb[i] =
556 alloc_skb(fep->rx_buffer_size + 16, GFP_ATOMIC);
557
558 if (fep->rx_skb[i] == NULL) {
559 /* Keep rx_slot here, the next time clean/fill is called
560 * we will try again before the MAL wraps back here
561 * If the MAL tries to use this descriptor with
562 * the EMPTY bit off it will cause the
563 * rxde interrupt. That is where we will
564 * try again to allocate an sk_buff.
565 */
566 break;
567 708
709 /* Second pass, allocate new skbs */
710 for (i = 0; i < NUM_RX_BUFF; ++i) {
711 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
712 if (!skb) {
713 ret = -ENOMEM;
714 goto oom;
568 } 715 }
569 716
570 if (skb_res) 717 BUG_ON(!dev->rx_skb[i]);
571 skb_reserve(fep->rx_skb[i], skb_res); 718 dev_kfree_skb(dev->rx_skb[i]);
572 719
573 /* We must NOT dma_map_single the cache line right after the 720 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
574 * buffer, so we must crop our sync size to account for the 721 dev->rx_desc[i].data_ptr =
575 * reserved space 722 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
576 */ 723 DMA_FROM_DEVICE) + 2;
577 fep->rx_desc[i].data_ptr = 724 dev->rx_skb[i] = skb;
578 (unsigned char *)dma_map_single(&fep->ocpdev->dev, 725 }
579 (void *)fep->rx_skb[i]-> 726 skip:
580 data, 727 /* Check if we need to change "Jumbo" bit in MR1 */
581 fep->rx_buffer_size - 728 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
582 skb_res, DMA_FROM_DEVICE); 729 /* This is to prevent starting RX channel in emac_rx_enable() */
583 730 dev->commac.rx_stopped = 1;
584 /* 731
585 * Some 4xx implementations use the previously 732 dev->ndev->mtu = new_mtu;
586 * reserved bits in data_len to encode the MS 733 emac_full_tx_reset(dev->ndev);
587 * 4-bits of a 36-bit physical address (ERPN) 734 }
588 * This must be initialized.
589 */
590 fep->rx_desc[i].data_len = 0;
591 fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR |
592 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
593 735
594 } while ((i = (i + 1) % NUM_RX_BUFF) != end); 736 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
737 oom:
738 /* Restart RX */
739 dev->commac.rx_stopped = dev->rx_slot = 0;
740 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
741 emac_rx_enable(dev);
595 742
596 fep->rx_slot = i; 743 return ret;
597} 744}
598 745
599static void 746/* Process ctx, rtnl_lock semaphore */
600emac_rx_csum(struct net_device *dev, unsigned short ctrl, struct sk_buff *skb) 747static int emac_change_mtu(struct net_device *ndev, int new_mtu)
601{ 748{
602 struct ocp_enet_private *fep = dev->priv; 749 struct ocp_enet_private *dev = ndev->priv;
750 int ret = 0;
603 751
604 /* Exit if interface has no TAH engine */ 752 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
605 if (!fep->tah_dev) { 753 return -EINVAL;
606 skb->ip_summed = CHECKSUM_NONE;
607 return;
608 }
609 754
610 /* Check for TCP/UDP/IP csum error */ 755 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
611 if (ctrl & EMAC_CSUM_VER_ERROR) {
612 /* Let the stack verify checksum errors */
613 skb->ip_summed = CHECKSUM_NONE;
614/* adapter->hw_csum_err++; */
615 } else {
616 /* Csum is good */
617 skb->ip_summed = CHECKSUM_UNNECESSARY;
618/* adapter->hw_csum_good++; */
619 }
620}
621 756
622static int emac_rx_clean(struct net_device *dev) 757 local_bh_disable();
623{ 758 if (netif_running(ndev)) {
624 int i, b, bnum = 0, buf[6]; 759 /* Check if we really need to reinitalize RX ring */
625 int error, frame_length; 760 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
626 struct ocp_enet_private *fep = dev->priv; 761 ret = emac_resize_rx_ring(dev, new_mtu);
627 unsigned short ctrl; 762 }
628 763
629 i = fep->rx_slot; 764 if (!ret) {
765 ndev->mtu = new_mtu;
766 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
767 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
768 }
769 local_bh_enable();
630 770
631 PKT_DEBUG(("emac_rx_clean() entry, rx_slot: %d\n", fep->rx_slot)); 771 return ret;
772}
632 773
633 do { 774static void emac_clean_tx_ring(struct ocp_enet_private *dev)
634 if (fep->rx_skb[i] == NULL) 775{
635 continue; /*we have already handled the packet but haved failed to alloc */ 776 int i;
636 /* 777 for (i = 0; i < NUM_TX_BUFF; ++i) {
637 since rx_desc is in uncached mem we don't keep reading it directly 778 if (dev->tx_skb[i]) {
638 we pull out a local copy of ctrl and do the checks on the copy. 779 dev_kfree_skb(dev->tx_skb[i]);
639 */ 780 dev->tx_skb[i] = NULL;
640 ctrl = fep->rx_desc[i].ctrl; 781 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
641 if (ctrl & MAL_RX_CTRL_EMPTY) 782 ++dev->estats.tx_dropped;
642 break; /*we don't have any more ready packets */
643
644 if (EMAC_IS_BAD_RX_PACKET(ctrl)) {
645 fep->stats.rx_errors++;
646 fep->stats.rx_dropped++;
647
648 if (ctrl & EMAC_RX_ST_OE)
649 fep->stats.rx_fifo_errors++;
650 if (ctrl & EMAC_RX_ST_AE)
651 fep->stats.rx_frame_errors++;
652 if (ctrl & EMAC_RX_ST_BFCS)
653 fep->stats.rx_crc_errors++;
654 if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |
655 EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))
656 fep->stats.rx_length_errors++;
657 } else {
658 if ((ctrl & (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) ==
659 (MAL_RX_CTRL_FIRST | MAL_RX_CTRL_LAST)) {
660 /* Single descriptor packet */
661 emac_rx_csum(dev, ctrl, fep->rx_skb[i]);
662 /* Send the skb up the chain. */
663 frame_length = fep->rx_desc[i].data_len - 4;
664 skb_put(fep->rx_skb[i], frame_length);
665 fep->rx_skb[i]->dev = dev;
666 fep->rx_skb[i]->protocol =
667 eth_type_trans(fep->rx_skb[i], dev);
668 error = netif_rx(fep->rx_skb[i]);
669
670 if ((error == NET_RX_DROP) ||
671 (error == NET_RX_BAD)) {
672 fep->stats.rx_dropped++;
673 } else {
674 fep->stats.rx_packets++;
675 fep->stats.rx_bytes += frame_length;
676 }
677 fep->rx_skb[i] = NULL;
678 } else {
679 /* Multiple descriptor packet */
680 if (ctrl & MAL_RX_CTRL_FIRST) {
681 if (fep->rx_desc[(i + 1) % NUM_RX_BUFF].
682 ctrl & MAL_RX_CTRL_EMPTY)
683 break;
684 bnum = 0;
685 buf[bnum] = i;
686 ++bnum;
687 continue;
688 }
689 if (((ctrl & MAL_RX_CTRL_FIRST) !=
690 MAL_RX_CTRL_FIRST) &&
691 ((ctrl & MAL_RX_CTRL_LAST) !=
692 MAL_RX_CTRL_LAST)) {
693 if (fep->rx_desc[(i + 1) %
694 NUM_RX_BUFF].ctrl &
695 MAL_RX_CTRL_EMPTY) {
696 i = buf[0];
697 break;
698 }
699 buf[bnum] = i;
700 ++bnum;
701 continue;
702 }
703 if (ctrl & MAL_RX_CTRL_LAST) {
704 buf[bnum] = i;
705 ++bnum;
706 skb_put(fep->rx_skb[buf[0]],
707 fep->rx_desc[buf[0]].data_len);
708 for (b = 1; b < bnum; b++) {
709 /*
710 * MAL is braindead, we need
711 * to copy the remainder
712 * of the packet from the
713 * latter descriptor buffers
714 * to the first skb. Then
715 * dispose of the source
716 * skbs.
717 *
718 * Once the stack is fixed
719 * to handle frags on most
720 * protocols we can generate
721 * a fragmented skb with
722 * no copies.
723 */
724 memcpy(fep->rx_skb[buf[0]]->
725 data +
726 fep->rx_skb[buf[0]]->len,
727 fep->rx_skb[buf[b]]->
728 data,
729 fep->rx_desc[buf[b]].
730 data_len);
731 skb_put(fep->rx_skb[buf[0]],
732 fep->rx_desc[buf[b]].
733 data_len);
734 dma_unmap_single(&fep->ocpdev->
735 dev,
736 fep->
737 rx_desc[buf
738 [b]].
739 data_ptr,
740 fep->
741 rx_desc[buf
742 [b]].
743 data_len,
744 DMA_FROM_DEVICE);
745 dev_kfree_skb(fep->
746 rx_skb[buf[b]]);
747 }
748 emac_rx_csum(dev, ctrl,
749 fep->rx_skb[buf[0]]);
750
751 fep->rx_skb[buf[0]]->dev = dev;
752 fep->rx_skb[buf[0]]->protocol =
753 eth_type_trans(fep->rx_skb[buf[0]],
754 dev);
755 error = netif_rx(fep->rx_skb[buf[0]]);
756
757 if ((error == NET_RX_DROP)
758 || (error == NET_RX_BAD)) {
759 fep->stats.rx_dropped++;
760 } else {
761 fep->stats.rx_packets++;
762 fep->stats.rx_bytes +=
763 fep->rx_skb[buf[0]]->len;
764 }
765 for (b = 0; b < bnum; b++)
766 fep->rx_skb[buf[b]] = NULL;
767 }
768 }
769 } 783 }
770 } while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot); 784 dev->tx_desc[i].ctrl = 0;
771 785 dev->tx_desc[i].data_ptr = 0;
772 PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot)); 786 }
773
774 return i;
775} 787}
776 788
777static void emac_rxeob_dev(void *param, u32 chanmask) 789static void emac_clean_rx_ring(struct ocp_enet_private *dev)
778{ 790{
779 struct net_device *dev = param; 791 int i;
780 struct ocp_enet_private *fep = dev->priv; 792 for (i = 0; i < NUM_RX_BUFF; ++i)
781 unsigned long flags; 793 if (dev->rx_skb[i]) {
782 int n; 794 dev->rx_desc[i].ctrl = 0;
795 dev_kfree_skb(dev->rx_skb[i]);
796 dev->rx_skb[i] = NULL;
797 dev->rx_desc[i].data_ptr = 0;
798 }
783 799
784 spin_lock_irqsave(&fep->lock, flags); 800 if (dev->rx_sg_skb) {
785 if ((n = emac_rx_clean(dev)) != fep->rx_slot) 801 dev_kfree_skb(dev->rx_sg_skb);
786 emac_rx_fill(dev, n); 802 dev->rx_sg_skb = NULL;
787 spin_unlock_irqrestore(&fep->lock, flags); 803 }
788} 804}
789 805
790/* 806static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
791 * This interrupt should never occurr, we don't program 807 int flags)
792 * the MAL for contiunous mode.
793 */
794static void emac_txde_dev(void *param, u32 chanmask)
795{ 808{
796 struct net_device *dev = param; 809 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
797 struct ocp_enet_private *fep = dev->priv; 810 if (unlikely(!skb))
811 return -ENOMEM;
798 812
799 printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name); 813 dev->rx_skb[slot] = skb;
814 dev->rx_desc[slot].data_len = 0;
800 815
801 emac_mac_dump(dev); 816 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
802 emac_mal_dump(dev); 817 dev->rx_desc[slot].data_ptr =
818 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
819 DMA_FROM_DEVICE) + 2;
820 barrier();
821 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
822 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
803 823
804 /* Reenable the transmit channel */ 824 return 0;
805 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
806} 825}
807 826
808/* 827static void emac_print_link_status(struct ocp_enet_private *dev)
809 * This interrupt should be very rare at best. This occurs when
810 * the hardware has a problem with the receive descriptors. The manual
811 * states that it occurs when the hardware cannot the receive descriptor
812 * empty bit is not set. The recovery mechanism will be to
813 * traverse through the descriptors, handle any that are marked to be
814 * handled and reinitialize each along the way. At that point the driver
815 * will be restarted.
816 */
817static void emac_rxde_dev(void *param, u32 chanmask)
818{ 828{
819 struct net_device *dev = param; 829 if (netif_carrier_ok(dev->ndev))
820 struct ocp_enet_private *fep = dev->priv; 830 printk(KERN_INFO "%s: link is up, %d %s%s\n",
821 unsigned long flags; 831 dev->ndev->name, dev->phy.speed,
822 832 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
823 if (net_ratelimit()) { 833 dev->phy.pause ? ", pause enabled" :
824 printk(KERN_WARNING "%s: receive descriptor error\n", 834 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
825 fep->ndev->name); 835 else
836 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
837}
826 838
827 emac_mac_dump(dev); 839/* Process ctx, rtnl_lock semaphore */
828 emac_mal_dump(dev); 840static int emac_open(struct net_device *ndev)
829 emac_desc_dump(dev); 841{
842 struct ocp_enet_private *dev = ndev->priv;
843 struct ocp_func_emac_data *emacdata = dev->def->additions;
844 int err, i;
845
846 DBG("%d: open" NL, dev->def->index);
847
848 /* Setup error IRQ handler */
849 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
850 if (err) {
851 printk(KERN_ERR "%s: failed to request IRQ %d\n",
852 ndev->name, dev->def->irq);
853 return err;
830 } 854 }
831 855
832 /* Disable RX channel */ 856 /* Allocate RX ring */
833 spin_lock_irqsave(&fep->lock, flags); 857 for (i = 0; i < NUM_RX_BUFF; ++i)
834 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 858 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
835 859 printk(KERN_ERR "%s: failed to allocate RX ring\n",
836 /* For now, charge the error against all emacs */ 860 ndev->name);
837 fep->stats.rx_errors++; 861 goto oom;
838 862 }
839 /* so do we have any good packets still? */
840 emac_rx_clean(dev);
841
842 /* When the interface is restarted it resets processing to the
843 * first descriptor in the table.
844 */
845
846 fep->rx_slot = 0;
847 emac_rx_fill(dev, 0);
848 863
849 set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask); 864 local_bh_disable();
850 set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask); 865 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
866 dev->commac.rx_stopped = 0;
867 dev->rx_sg_skb = NULL;
868
869 if (dev->phy.address >= 0) {
870 int link_poll_interval;
871 if (dev->phy.def->ops->poll_link(&dev->phy)) {
872 dev->phy.def->ops->read_link(&dev->phy);
873 EMAC_RX_CLK_DEFAULT(dev->def->index);
874 netif_carrier_on(dev->ndev);
875 link_poll_interval = PHY_POLL_LINK_ON;
876 } else {
877 EMAC_RX_CLK_TX(dev->def->index);
878 netif_carrier_off(dev->ndev);
879 link_poll_interval = PHY_POLL_LINK_OFF;
880 }
881 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
882 emac_print_link_status(dev);
883 } else
884 netif_carrier_on(dev->ndev);
885
886 emac_configure(dev);
887 mal_poll_add(dev->mal, &dev->commac);
888 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
889 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
890 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
891 emac_tx_enable(dev);
892 emac_rx_enable(dev);
893 netif_start_queue(ndev);
894 local_bh_enable();
851 895
852 /* Reenable the receive channels */ 896 return 0;
853 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 897 oom:
854 spin_unlock_irqrestore(&fep->lock, flags); 898 emac_clean_rx_ring(dev);
899 free_irq(dev->def->irq, dev);
900 return -ENOMEM;
855} 901}
856 902
857static irqreturn_t 903/* BHs disabled */
858emac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs) 904static int emac_link_differs(struct ocp_enet_private *dev)
859{ 905{
860 struct net_device *dev = dev_instance; 906 u32 r = in_be32(&dev->emacp->mr1);
861 struct ocp_enet_private *fep = dev->priv;
862 emac_t *emacp = fep->emacp;
863 unsigned long tmp_em0isr;
864 907
865 /* EMAC interrupt */ 908 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
866 tmp_em0isr = in_be32(&emacp->em0isr); 909 int speed, pause, asym_pause;
867 if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {
868 /* This error is a hard transmit error - could retransmit */
869 fep->stats.tx_errors++;
870 910
871 /* Reenable the transmit channel */ 911 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
872 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask); 912 speed = SPEED_1000;
913 else if (r & EMAC_MR1_MF_100)
914 speed = SPEED_100;
915 else
916 speed = SPEED_10;
873 917
874 } else { 918 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
875 fep->stats.rx_errors++; 919 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
920 pause = 1;
921 asym_pause = 0;
922 break;
923 case EMAC_MR1_APP:
924 pause = 0;
925 asym_pause = 1;
926 break;
927 default:
928 pause = asym_pause = 0;
876 } 929 }
877 930 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
878 if (tmp_em0isr & EMAC_ISR_RP) 931 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
879 fep->stats.rx_length_errors++;
880 if (tmp_em0isr & EMAC_ISR_ALE)
881 fep->stats.rx_frame_errors++;
882 if (tmp_em0isr & EMAC_ISR_BFCS)
883 fep->stats.rx_crc_errors++;
884 if (tmp_em0isr & EMAC_ISR_PTLE)
885 fep->stats.rx_length_errors++;
886 if (tmp_em0isr & EMAC_ISR_ORE)
887 fep->stats.rx_length_errors++;
888 if (tmp_em0isr & EMAC_ISR_TE0)
889 fep->stats.tx_aborted_errors++;
890
891 emac_err_dump(dev, tmp_em0isr);
892
893 out_be32(&emacp->em0isr, tmp_em0isr);
894
895 return IRQ_HANDLED;
896} 932}
897 933
898static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) 934/* BHs disabled */
935static void emac_link_timer(unsigned long data)
899{ 936{
900 unsigned short ctrl; 937 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
901 unsigned long flags; 938 int link_poll_interval;
902 struct ocp_enet_private *fep = dev->priv;
903 emac_t *emacp = fep->emacp;
904 int len = skb->len;
905 unsigned int offset = 0, size, f, tx_slot_first;
906 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
907 939
908 spin_lock_irqsave(&fep->lock, flags); 940 DBG2("%d: link timer" NL, dev->def->index);
909 941
910 len -= skb->data_len; 942 if (dev->phy.def->ops->poll_link(&dev->phy)) {
943 if (!netif_carrier_ok(dev->ndev)) {
944 EMAC_RX_CLK_DEFAULT(dev->def->index);
911 945
912 if ((fep->tx_cnt + nr_frags + len / DESC_BUF_SIZE + 1) > NUM_TX_BUFF) { 946 /* Get new link parameters */
913 PKT_DEBUG(("emac_start_xmit() stopping queue\n")); 947 dev->phy.def->ops->read_link(&dev->phy);
914 netif_stop_queue(dev);
915 spin_unlock_irqrestore(&fep->lock, flags);
916 return -EBUSY;
917 }
918 948
919 tx_slot_first = fep->tx_slot; 949 if (dev->tah_dev || emac_link_differs(dev))
950 emac_full_tx_reset(dev->ndev);
920 951
921 while (len) { 952 netif_carrier_on(dev->ndev);
922 size = min(len, DESC_BUF_SIZE); 953 emac_print_link_status(dev);
923 954 }
924 fep->tx_desc[fep->tx_slot].data_len = (short)size; 955 link_poll_interval = PHY_POLL_LINK_ON;
925 fep->tx_desc[fep->tx_slot].data_ptr = 956 } else {
926 (unsigned char *)dma_map_single(&fep->ocpdev->dev, 957 if (netif_carrier_ok(dev->ndev)) {
927 (void *)((unsigned int)skb-> 958 EMAC_RX_CLK_TX(dev->def->index);
928 data + offset), 959#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
929 size, DMA_TO_DEVICE); 960 emac_reinitialize(dev);
930 961#endif
931 ctrl = EMAC_TX_CTRL_DFLT; 962 netif_carrier_off(dev->ndev);
932 if (fep->tx_slot != tx_slot_first) 963 emac_print_link_status(dev);
933 ctrl |= MAL_TX_CTRL_READY;
934 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
935 ctrl |= MAL_TX_CTRL_WRAP;
936 if (!nr_frags && (len == size)) {
937 ctrl |= MAL_TX_CTRL_LAST;
938 fep->tx_skb[fep->tx_slot] = skb;
939 } 964 }
940 if (skb->ip_summed == CHECKSUM_HW)
941 ctrl |= EMAC_TX_CTRL_TAH_CSUM;
942 965
943 fep->tx_desc[fep->tx_slot].ctrl = ctrl; 966 /* Retry reset if the previous attempt failed.
967 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
968 * case, but I left it here because it shouldn't trigger for
969 * sane PHYs anyway.
970 */
971 if (unlikely(dev->reset_failed))
972 emac_reinitialize(dev);
944 973
945 len -= size; 974 link_poll_interval = PHY_POLL_LINK_OFF;
946 offset += size; 975 }
976 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
977}
947 978
948 /* Bump tx count */ 979/* BHs disabled */
949 if (++fep->tx_cnt == NUM_TX_BUFF) 980static void emac_force_link_update(struct ocp_enet_private *dev)
950 netif_stop_queue(dev); 981{
982 netif_carrier_off(dev->ndev);
983 if (timer_pending(&dev->link_timer))
984 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
985}
951 986
952 /* Next descriptor */ 987/* Process ctx, rtnl_lock semaphore */
953 if (++fep->tx_slot == NUM_TX_BUFF) 988static int emac_close(struct net_device *ndev)
954 fep->tx_slot = 0; 989{
955 } 990 struct ocp_enet_private *dev = ndev->priv;
991 struct ocp_func_emac_data *emacdata = dev->def->additions;
956 992
957 for (f = 0; f < nr_frags; f++) { 993 DBG("%d: close" NL, dev->def->index);
958 struct skb_frag_struct *frag;
959 994
960 frag = &skb_shinfo(skb)->frags[f]; 995 local_bh_disable();
961 len = frag->size;
962 offset = 0;
963
964 while (len) {
965 size = min(len, DESC_BUF_SIZE);
966
967 dma_map_page(&fep->ocpdev->dev,
968 frag->page,
969 frag->page_offset + offset,
970 size, DMA_TO_DEVICE);
971
972 ctrl = EMAC_TX_CTRL_DFLT | MAL_TX_CTRL_READY;
973 if ((NUM_TX_BUFF - 1) == fep->tx_slot)
974 ctrl |= MAL_TX_CTRL_WRAP;
975 if ((f == (nr_frags - 1)) && (len == size)) {
976 ctrl |= MAL_TX_CTRL_LAST;
977 fep->tx_skb[fep->tx_slot] = skb;
978 }
979 996
980 if (skb->ip_summed == CHECKSUM_HW) 997 if (dev->phy.address >= 0)
981 ctrl |= EMAC_TX_CTRL_TAH_CSUM; 998 del_timer_sync(&dev->link_timer);
982 999
983 fep->tx_desc[fep->tx_slot].data_len = (short)size; 1000 netif_stop_queue(ndev);
984 fep->tx_desc[fep->tx_slot].data_ptr = 1001 emac_rx_disable(dev);
985 (char *)((page_to_pfn(frag->page) << PAGE_SHIFT) + 1002 emac_tx_disable(dev);
986 frag->page_offset + offset); 1003 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
987 fep->tx_desc[fep->tx_slot].ctrl = ctrl; 1004 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1005 mal_poll_del(dev->mal, &dev->commac);
1006 local_bh_enable();
988 1007
989 len -= size; 1008 emac_clean_tx_ring(dev);
990 offset += size; 1009 emac_clean_rx_ring(dev);
1010 free_irq(dev->def->irq, dev);
991 1011
992 /* Bump tx count */ 1012 return 0;
993 if (++fep->tx_cnt == NUM_TX_BUFF) 1013}
994 netif_stop_queue(dev);
995 1014
996 /* Next descriptor */ 1015static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
997 if (++fep->tx_slot == NUM_TX_BUFF) 1016 struct sk_buff *skb)
998 fep->tx_slot = 0; 1017{
999 } 1018#if defined(CONFIG_IBM_EMAC_TAH)
1019 if (skb->ip_summed == CHECKSUM_HW) {
1020 ++dev->stats.tx_packets_csum;
1021 return EMAC_TX_CTRL_TAH_CSUM;
1000 } 1022 }
1023#endif
1024 return 0;
1025}
1001 1026
1002 /* 1027static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1003 * Deferred set READY on first descriptor of packet to 1028{
1004 * avoid TX MAL race. 1029 struct emac_regs *p = dev->emacp;
1005 */ 1030 struct net_device *ndev = dev->ndev;
1006 fep->tx_desc[tx_slot_first].ctrl |= MAL_TX_CTRL_READY;
1007
1008 /* Send the packet out. */
1009 out_be32(&emacp->em0tmr0, EMAC_TMR0_XMIT);
1010 1031
1011 fep->stats.tx_packets++; 1032 /* Send the packet out */
1012 fep->stats.tx_bytes += skb->len; 1033 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1013 1034
1014 PKT_DEBUG(("emac_start_xmit() exitn")); 1035 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1036 netif_stop_queue(ndev);
1037 DBG2("%d: stopped TX queue" NL, dev->def->index);
1038 }
1015 1039
1016 spin_unlock_irqrestore(&fep->lock, flags); 1040 ndev->trans_start = jiffies;
1041 ++dev->stats.tx_packets;
1042 dev->stats.tx_bytes += len;
1017 1043
1018 return 0; 1044 return 0;
1019} 1045}
1020 1046
1021static int emac_adjust_to_link(struct ocp_enet_private *fep) 1047/* BHs disabled */
1048static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1022{ 1049{
1023 emac_t *emacp = fep->emacp; 1050 struct ocp_enet_private *dev = ndev->priv;
1024 unsigned long mode_reg; 1051 unsigned int len = skb->len;
1025 int full_duplex, speed; 1052 int slot;
1026 1053
1027 full_duplex = 0; 1054 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1028 speed = SPEED_10; 1055 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1029 1056
1030 /* set mode register 1 defaults */ 1057 slot = dev->tx_slot++;
1031 mode_reg = EMAC_M1_DEFAULT; 1058 if (dev->tx_slot == NUM_TX_BUFF) {
1032 1059 dev->tx_slot = 0;
1033 /* Read link mode on PHY */ 1060 ctrl |= MAL_TX_CTRL_WRAP;
1034 if (fep->phy_mii.def->ops->read_link(&fep->phy_mii) == 0) {
1035 /* If an error occurred, we don't deal with it yet */
1036 full_duplex = (fep->phy_mii.duplex == DUPLEX_FULL);
1037 speed = fep->phy_mii.speed;
1038 } 1061 }
1039 1062
1063 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1040 1064
1041 /* set speed (default is 10Mb) */ 1065 dev->tx_skb[slot] = skb;
1042 switch (speed) { 1066 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1043 case SPEED_1000: 1067 DMA_TO_DEVICE);
1044 mode_reg |= EMAC_M1_RFS_16K; 1068 dev->tx_desc[slot].data_len = (u16) len;
1045 if (fep->rgmii_dev) { 1069 barrier();
1046 struct ibm_ocp_rgmii *rgmii = RGMII_PRIV(fep->rgmii_dev); 1070 dev->tx_desc[slot].ctrl = ctrl;
1047
1048 if ((rgmii->mode[fep->rgmii_input] == RTBI)
1049 || (rgmii->mode[fep->rgmii_input] == TBI))
1050 mode_reg |= EMAC_M1_MF_1000GPCS;
1051 else
1052 mode_reg |= EMAC_M1_MF_1000MBPS;
1053
1054 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1055 1000);
1056 }
1057 break;
1058 case SPEED_100:
1059 mode_reg |= EMAC_M1_MF_100MBPS | EMAC_M1_RFS_4K;
1060 if (fep->rgmii_dev)
1061 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1062 100);
1063 if (fep->zmii_dev)
1064 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1065 100);
1066 break;
1067 case SPEED_10:
1068 default:
1069 mode_reg = (mode_reg & ~EMAC_M1_MF_100MBPS) | EMAC_M1_RFS_4K;
1070 if (fep->rgmii_dev)
1071 emac_rgmii_port_speed(fep->rgmii_dev, fep->rgmii_input,
1072 10);
1073 if (fep->zmii_dev)
1074 emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input,
1075 10);
1076 }
1077
1078 if (full_duplex)
1079 mode_reg |= EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_IST;
1080 else
1081 mode_reg &= ~(EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_ILE);
1082 1071
1083 LINK_DEBUG(("%s: adjust to link, speed: %d, duplex: %d, opened: %d\n", 1072 return emac_xmit_finish(dev, len);
1084 fep->ndev->name, speed, full_duplex, fep->opened));
1085
1086 printk(KERN_INFO "%s: Speed: %d, %s duplex.\n",
1087 fep->ndev->name, speed, full_duplex ? "Full" : "Half");
1088 if (fep->opened)
1089 out_be32(&emacp->em0mr1, mode_reg);
1090
1091 return 0;
1092} 1073}
1093 1074
1094static int emac_set_mac_address(struct net_device *ndev, void *p) 1075#if defined(CONFIG_IBM_EMAC_TAH)
1076static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1077 u32 pd, int len, int last, u16 base_ctrl)
1095{ 1078{
1096 struct ocp_enet_private *fep = ndev->priv; 1079 while (1) {
1097 emac_t *emacp = fep->emacp; 1080 u16 ctrl = base_ctrl;
1098 struct sockaddr *addr = p; 1081 int chunk = min(len, MAL_MAX_TX_SIZE);
1082 len -= chunk;
1099 1083
1100 if (!is_valid_ether_addr(addr->sa_data)) 1084 slot = (slot + 1) % NUM_TX_BUFF;
1101 return -EADDRNOTAVAIL;
1102 1085
1103 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 1086 if (last && !len)
1087 ctrl |= MAL_TX_CTRL_LAST;
1088 if (slot == NUM_TX_BUFF - 1)
1089 ctrl |= MAL_TX_CTRL_WRAP;
1104 1090
1105 /* set the high address */ 1091 dev->tx_skb[slot] = NULL;
1106 out_be32(&emacp->em0iahr, 1092 dev->tx_desc[slot].data_ptr = pd;
1107 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]); 1093 dev->tx_desc[slot].data_len = (u16) chunk;
1094 dev->tx_desc[slot].ctrl = ctrl;
1095 ++dev->tx_cnt;
1108 1096
1109 /* set the low address */ 1097 if (!len)
1110 out_be32(&emacp->em0ialr, 1098 break;
1111 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1112 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1113 1099
1114 return 0; 1100 pd += chunk;
1101 }
1102 return slot;
1115} 1103}
1116 1104
1117static int emac_change_mtu(struct net_device *dev, int new_mtu) 1105/* BHs disabled (SG version for TAH equipped EMACs) */
1106static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1118{ 1107{
1119 struct ocp_enet_private *fep = dev->priv; 1108 struct ocp_enet_private *dev = ndev->priv;
1120 int old_mtu = dev->mtu; 1109 int nr_frags = skb_shinfo(skb)->nr_frags;
1121 unsigned long mode_reg; 1110 int len = skb->len, chunk;
1122 emac_t *emacp = fep->emacp; 1111 int slot, i;
1123 u32 em0mr0; 1112 u16 ctrl;
1124 int i, full; 1113 u32 pd;
1125 unsigned long flags;
1126 1114
1127 if ((new_mtu < EMAC_MIN_MTU) || (new_mtu > EMAC_MAX_MTU)) { 1115 /* This is common "fast" path */
1128 printk(KERN_ERR 1116 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1129 "emac: Invalid MTU setting, MTU must be between %d and %d\n", 1117 return emac_start_xmit(skb, ndev);
1130 EMAC_MIN_MTU, EMAC_MAX_MTU);
1131 return -EINVAL;
1132 }
1133 1118
1134 if (old_mtu != new_mtu && netif_running(dev)) { 1119 len -= skb->data_len;
1135 /* Stop rx engine */
1136 em0mr0 = in_be32(&emacp->em0mr0);
1137 out_be32(&emacp->em0mr0, em0mr0 & ~EMAC_M0_RXE);
1138
1139 /* Wait for descriptors to be empty */
1140 do {
1141 full = 0;
1142 for (i = 0; i < NUM_RX_BUFF; i++)
1143 if (!(fep->rx_desc[i].ctrl & MAL_RX_CTRL_EMPTY)) {
1144 printk(KERN_NOTICE
1145 "emac: RX ring is still full\n");
1146 full = 1;
1147 }
1148 } while (full);
1149
1150 spin_lock_irqsave(&fep->lock, flags);
1151
1152 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1153
1154 /* Destroy all old rx skbs */
1155 for (i = 0; i < NUM_RX_BUFF; i++) {
1156 dma_unmap_single(&fep->ocpdev->dev,
1157 fep->rx_desc[i].data_ptr,
1158 fep->rx_desc[i].data_len,
1159 DMA_FROM_DEVICE);
1160 dev_kfree_skb(fep->rx_skb[i]);
1161 fep->rx_skb[i] = NULL;
1162 }
1163 1120
1164 /* Set new rx_buffer_size, jumbo cap, and advertise new mtu */ 1121 /* Note, this is only an *estimation*, we can still run out of empty
1165 mode_reg = in_be32(&emacp->em0mr1); 1122 * slots because of the additional fragmentation into
1166 if (new_mtu > ENET_DEF_MTU_SIZE) { 1123 * MAL_MAX_TX_SIZE-sized chunks
1167 mode_reg |= EMAC_M1_JUMBO_ENABLE; 1124 */
1168 fep->rx_buffer_size = EMAC_MAX_FRAME; 1125 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1169 } else { 1126 goto stop_queue;
1170 mode_reg &= ~EMAC_M1_JUMBO_ENABLE; 1127
1171 fep->rx_buffer_size = ENET_DEF_BUF_SIZE; 1128 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1172 } 1129 emac_tx_csum(dev, skb);
1173 dev->mtu = new_mtu; 1130 slot = dev->tx_slot;
1174 out_be32(&emacp->em0mr1, mode_reg); 1131
1132 /* skb data */
1133 dev->tx_skb[slot] = NULL;
1134 chunk = min(len, MAL_MAX_TX_SIZE);
1135 dev->tx_desc[slot].data_ptr = pd =
1136 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1137 dev->tx_desc[slot].data_len = (u16) chunk;
1138 len -= chunk;
1139 if (unlikely(len))
1140 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1141 ctrl);
1142 /* skb fragments */
1143 for (i = 0; i < nr_frags; ++i) {
1144 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1145 len = frag->size;
1175 1146
1176 /* Re-init rx skbs */ 1147 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1177 fep->rx_slot = 0; 1148 goto undo_frame;
1178 emac_rx_fill(dev, 0);
1179 1149
1180 /* Restart the rx engine */ 1150 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1181 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 1151 DMA_TO_DEVICE);
1182 out_be32(&emacp->em0mr0, em0mr0 | EMAC_M0_RXE);
1183 1152
1184 spin_unlock_irqrestore(&fep->lock, flags); 1153 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1154 ctrl);
1185 } 1155 }
1186 1156
1187 return 0; 1157 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1188} 1158 dev->tx_slot, slot);
1189 1159
1190static void __emac_set_multicast_list(struct net_device *dev) 1160 /* Attach skb to the last slot so we don't release it too early */
1191{ 1161 dev->tx_skb[slot] = skb;
1192 struct ocp_enet_private *fep = dev->priv;
1193 emac_t *emacp = fep->emacp;
1194 u32 rmr = in_be32(&emacp->em0rmr);
1195 1162
1196 /* First clear all special bits, they can be set later */ 1163 /* Send the packet out */
1197 rmr &= ~(EMAC_RMR_PME | EMAC_RMR_PMME | EMAC_RMR_MAE); 1164 if (dev->tx_slot == NUM_TX_BUFF - 1)
1165 ctrl |= MAL_TX_CTRL_WRAP;
1166 barrier();
1167 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1168 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1198 1169
1199 if (dev->flags & IFF_PROMISC) { 1170 return emac_xmit_finish(dev, skb->len);
1200 rmr |= EMAC_RMR_PME;
1201 } else if (dev->flags & IFF_ALLMULTI || 32 < dev->mc_count) {
1202 /*
1203 * Must be setting up to use multicast
1204 * Now check for promiscuous multicast
1205 */
1206 rmr |= EMAC_RMR_PMME;
1207 } else if (dev->flags & IFF_MULTICAST && 0 < dev->mc_count) {
1208 unsigned short em0gaht[4] = { 0, 0, 0, 0 };
1209 struct dev_mc_list *dmi;
1210
1211 /* Need to hash on the multicast address. */
1212 for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
1213 unsigned long mc_crc;
1214 unsigned int bit_number;
1215
1216 mc_crc = ether_crc(6, (char *)dmi->dmi_addr);
1217 bit_number = 63 - (mc_crc >> 26); /* MSB: 0 LSB: 63 */
1218 em0gaht[bit_number >> 4] |=
1219 0x8000 >> (bit_number & 0x0f);
1220 }
1221 emacp->em0gaht1 = em0gaht[0];
1222 emacp->em0gaht2 = em0gaht[1];
1223 emacp->em0gaht3 = em0gaht[2];
1224 emacp->em0gaht4 = em0gaht[3];
1225 1171
1226 /* Turn on multicast addressing */ 1172 undo_frame:
1227 rmr |= EMAC_RMR_MAE; 1173 /* Well, too bad. Our previous estimation was overly optimistic.
1174 * Undo everything.
1175 */
1176 while (slot != dev->tx_slot) {
1177 dev->tx_desc[slot].ctrl = 0;
1178 --dev->tx_cnt;
1179 if (--slot < 0)
1180 slot = NUM_TX_BUFF - 1;
1228 } 1181 }
1229 out_be32(&emacp->em0rmr, rmr); 1182 ++dev->estats.tx_undo;
1183
1184 stop_queue:
1185 netif_stop_queue(ndev);
1186 DBG2("%d: stopped TX queue" NL, dev->def->index);
1187 return 1;
1230} 1188}
1189#else
1190# define emac_start_xmit_sg emac_start_xmit
1191#endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1231 1192
1232static int emac_init_tah(struct ocp_enet_private *fep) 1193/* BHs disabled */
1194static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1233{ 1195{
1234 tah_t *tahp; 1196 struct ibm_emac_error_stats *st = &dev->estats;
1197 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1198
1199 ++st->tx_bd_errors;
1200 if (ctrl & EMAC_TX_ST_BFCS)
1201 ++st->tx_bd_bad_fcs;
1202 if (ctrl & EMAC_TX_ST_LCS)
1203 ++st->tx_bd_carrier_loss;
1204 if (ctrl & EMAC_TX_ST_ED)
1205 ++st->tx_bd_excessive_deferral;
1206 if (ctrl & EMAC_TX_ST_EC)
1207 ++st->tx_bd_excessive_collisions;
1208 if (ctrl & EMAC_TX_ST_LC)
1209 ++st->tx_bd_late_collision;
1210 if (ctrl & EMAC_TX_ST_MC)
1211 ++st->tx_bd_multple_collisions;
1212 if (ctrl & EMAC_TX_ST_SC)
1213 ++st->tx_bd_single_collision;
1214 if (ctrl & EMAC_TX_ST_UR)
1215 ++st->tx_bd_underrun;
1216 if (ctrl & EMAC_TX_ST_SQE)
1217 ++st->tx_bd_sqe;
1218}
1235 1219
1236 /* Initialize TAH and enable checksum verification */ 1220static void emac_poll_tx(void *param)
1237 tahp = (tah_t *) ioremap(fep->tah_dev->def->paddr, sizeof(*tahp)); 1221{
1222 struct ocp_enet_private *dev = param;
1223 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1224 dev->ack_slot);
1225
1226 if (dev->tx_cnt) {
1227 u16 ctrl;
1228 int slot = dev->ack_slot, n = 0;
1229 again:
1230 ctrl = dev->tx_desc[slot].ctrl;
1231 if (!(ctrl & MAL_TX_CTRL_READY)) {
1232 struct sk_buff *skb = dev->tx_skb[slot];
1233 ++n;
1234
1235 if (skb) {
1236 dev_kfree_skb(skb);
1237 dev->tx_skb[slot] = NULL;
1238 }
1239 slot = (slot + 1) % NUM_TX_BUFF;
1238 1240
1239 if (tahp == NULL) { 1241 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1240 printk(KERN_ERR "tah%d: Cannot ioremap TAH registers!\n", 1242 emac_parse_tx_error(dev, ctrl);
1241 fep->tah_dev->def->index);
1242 1243
1243 return -ENOMEM; 1244 if (--dev->tx_cnt)
1244 } 1245 goto again;
1245 1246 }
1246 out_be32(&tahp->tah_mr, TAH_MR_SR); 1247 if (n) {
1248 dev->ack_slot = slot;
1249 if (netif_queue_stopped(dev->ndev) &&
1250 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1251 netif_wake_queue(dev->ndev);
1247 1252
1248 /* wait for reset to complete */ 1253 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1249 while (in_be32(&tahp->tah_mr) & TAH_MR_SR) ; 1254 }
1255 }
1256}
1250 1257
1251 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */ 1258static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1252 out_be32(&tahp->tah_mr, 1259 int len)
1253 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP | 1260{
1254 TAH_MR_DIG); 1261 struct sk_buff *skb = dev->rx_skb[slot];
1262 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1255 1263
1256 iounmap(tahp); 1264 if (len)
1265 dma_map_single(dev->ldev, skb->data - 2,
1266 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1257 1267
1258 return 0; 1268 dev->rx_desc[slot].data_len = 0;
1269 barrier();
1270 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1271 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1259} 1272}
1260 1273
1261static void emac_init_rings(struct net_device *dev) 1274static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1262{ 1275{
1263 struct ocp_enet_private *ep = dev->priv; 1276 struct ibm_emac_error_stats *st = &dev->estats;
1264 int loop; 1277 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1278
1279 ++st->rx_bd_errors;
1280 if (ctrl & EMAC_RX_ST_OE)
1281 ++st->rx_bd_overrun;
1282 if (ctrl & EMAC_RX_ST_BP)
1283 ++st->rx_bd_bad_packet;
1284 if (ctrl & EMAC_RX_ST_RP)
1285 ++st->rx_bd_runt_packet;
1286 if (ctrl & EMAC_RX_ST_SE)
1287 ++st->rx_bd_short_event;
1288 if (ctrl & EMAC_RX_ST_AE)
1289 ++st->rx_bd_alignment_error;
1290 if (ctrl & EMAC_RX_ST_BFCS)
1291 ++st->rx_bd_bad_fcs;
1292 if (ctrl & EMAC_RX_ST_PTL)
1293 ++st->rx_bd_packet_too_long;
1294 if (ctrl & EMAC_RX_ST_ORE)
1295 ++st->rx_bd_out_of_range;
1296 if (ctrl & EMAC_RX_ST_IRE)
1297 ++st->rx_bd_in_range;
1298}
1265 1299
1266 ep->tx_desc = (struct mal_descriptor *)((char *)ep->mal->tx_virt_addr + 1300static inline void emac_rx_csum(struct ocp_enet_private *dev,
1267 (ep->mal_tx_chan * 1301 struct sk_buff *skb, u16 ctrl)
1268 MAL_DT_ALIGN)); 1302{
1269 ep->rx_desc = 1303#if defined(CONFIG_IBM_EMAC_TAH)
1270 (struct mal_descriptor *)((char *)ep->mal->rx_virt_addr + 1304 if (!ctrl && dev->tah_dev) {
1271 (ep->mal_rx_chan * MAL_DT_ALIGN)); 1305 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306 ++dev->stats.rx_packets_csum;
1307 }
1308#endif
1309}
1272 1310
1273 /* Fill in the transmit descriptor ring. */ 1311static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1274 for (loop = 0; loop < NUM_TX_BUFF; loop++) { 1312{
1275 if (ep->tx_skb[loop]) { 1313 if (likely(dev->rx_sg_skb != NULL)) {
1276 dma_unmap_single(&ep->ocpdev->dev, 1314 int len = dev->rx_desc[slot].data_len;
1277 ep->tx_desc[loop].data_ptr, 1315 int tot_len = dev->rx_sg_skb->len + len;
1278 ep->tx_desc[loop].data_len, 1316
1279 DMA_TO_DEVICE); 1317 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1280 dev_kfree_skb_irq(ep->tx_skb[loop]); 1318 ++dev->estats.rx_dropped_mtu;
1319 dev_kfree_skb(dev->rx_sg_skb);
1320 dev->rx_sg_skb = NULL;
1321 } else {
1322 cacheable_memcpy(dev->rx_sg_skb->tail,
1323 dev->rx_skb[slot]->data, len);
1324 skb_put(dev->rx_sg_skb, len);
1325 emac_recycle_rx_skb(dev, slot, len);
1326 return 0;
1281 } 1327 }
1282 ep->tx_skb[loop] = NULL;
1283 ep->tx_desc[loop].ctrl = 0;
1284 ep->tx_desc[loop].data_len = 0;
1285 ep->tx_desc[loop].data_ptr = NULL;
1286 }
1287 ep->tx_desc[loop - 1].ctrl |= MAL_TX_CTRL_WRAP;
1288
1289 /* Format the receive descriptor ring. */
1290 ep->rx_slot = 0;
1291 /* Default is MTU=1500 + Ethernet overhead */
1292 ep->rx_buffer_size = dev->mtu + ENET_HEADER_SIZE + ENET_FCS_SIZE;
1293 emac_rx_fill(dev, 0);
1294 if (ep->rx_slot != 0) {
1295 printk(KERN_ERR
1296 "%s: Not enough mem for RxChain durning Open?\n",
1297 dev->name);
1298 /*We couldn't fill the ring at startup?
1299 *We could clean up and fail to open but right now we will try to
1300 *carry on. It may be a sign of a bad NUM_RX_BUFF value
1301 */
1302 } 1328 }
1303 1329 emac_recycle_rx_skb(dev, slot, 0);
1304 ep->tx_cnt = 0; 1330 return -1;
1305 ep->tx_slot = 0;
1306 ep->ack_slot = 0;
1307} 1331}
1308 1332
1309static void emac_reset_configure(struct ocp_enet_private *fep) 1333/* BHs disabled */
1334static int emac_poll_rx(void *param, int budget)
1310{ 1335{
1311 emac_t *emacp = fep->emacp; 1336 struct ocp_enet_private *dev = param;
1312 int i; 1337 int slot = dev->rx_slot, received = 0;
1313
1314 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1315 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1316 1338
1317 /* 1339 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1318 * Check for a link, some PHYs don't provide a clock if
1319 * no link is present. Some EMACs will not come out of
1320 * soft reset without a PHY clock present.
1321 */
1322 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1323 /* Reset the EMAC */
1324 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1325 udelay(20);
1326 for (i = 0; i < 100; i++) {
1327 if ((in_be32(&emacp->em0mr0) & EMAC_M0_SRST) == 0)
1328 break;
1329 udelay(10);
1330 }
1331
1332 if (i >= 100) {
1333 printk(KERN_ERR "%s: Cannot reset EMAC\n",
1334 fep->ndev->name);
1335 return;
1336 }
1337 }
1338 1340
1339 /* Switch IRQs off for now */ 1341 again:
1340 out_be32(&emacp->em0iser, 0); 1342 while (budget > 0) {
1343 int len;
1344 struct sk_buff *skb;
1345 u16 ctrl = dev->rx_desc[slot].ctrl;
1341 1346
1342 /* Configure MAL rx channel */ 1347 if (ctrl & MAL_RX_CTRL_EMPTY)
1343 mal_set_rcbs(fep->mal, fep->mal_rx_chan, DESC_BUF_SIZE_REG); 1348 break;
1344 1349
1345 /* set the high address */ 1350 skb = dev->rx_skb[slot];
1346 out_be32(&emacp->em0iahr, 1351 barrier();
1347 (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]); 1352 len = dev->rx_desc[slot].data_len;
1348 1353
1349 /* set the low address */ 1354 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1350 out_be32(&emacp->em0ialr, 1355 goto sg;
1351 (fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
1352 | (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
1353 1356
1354 /* Adjust to link */ 1357 ctrl &= EMAC_BAD_RX_MASK;
1355 if (netif_carrier_ok(fep->ndev)) 1358 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1356 emac_adjust_to_link(fep); 1359 emac_parse_rx_error(dev, ctrl);
1360 ++dev->estats.rx_dropped_error;
1361 emac_recycle_rx_skb(dev, slot, 0);
1362 len = 0;
1363 goto next;
1364 }
1357 1365
1358 /* enable broadcast/individual address and RX FIFO defaults */ 1366 if (len && len < EMAC_RX_COPY_THRESH) {
1359 out_be32(&emacp->em0rmr, EMAC_RMR_DEFAULT); 1367 struct sk_buff *copy_skb =
1368 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1369 if (unlikely(!copy_skb))
1370 goto oom;
1371
1372 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1373 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1374 len + 2);
1375 emac_recycle_rx_skb(dev, slot, len);
1376 skb = copy_skb;
1377 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1378 goto oom;
1379
1380 skb_put(skb, len);
1381 push_packet:
1382 skb->dev = dev->ndev;
1383 skb->protocol = eth_type_trans(skb, dev->ndev);
1384 emac_rx_csum(dev, skb, ctrl);
1385
1386 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1387 ++dev->estats.rx_dropped_stack;
1388 next:
1389 ++dev->stats.rx_packets;
1390 skip:
1391 dev->stats.rx_bytes += len;
1392 slot = (slot + 1) % NUM_RX_BUFF;
1393 --budget;
1394 ++received;
1395 continue;
1396 sg:
1397 if (ctrl & MAL_RX_CTRL_FIRST) {
1398 BUG_ON(dev->rx_sg_skb);
1399 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1400 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1401 ++dev->estats.rx_dropped_oom;
1402 emac_recycle_rx_skb(dev, slot, 0);
1403 } else {
1404 dev->rx_sg_skb = skb;
1405 skb_put(skb, len);
1406 }
1407 } else if (!emac_rx_sg_append(dev, slot) &&
1408 (ctrl & MAL_RX_CTRL_LAST)) {
1409
1410 skb = dev->rx_sg_skb;
1411 dev->rx_sg_skb = NULL;
1412
1413 ctrl &= EMAC_BAD_RX_MASK;
1414 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1415 emac_parse_rx_error(dev, ctrl);
1416 ++dev->estats.rx_dropped_error;
1417 dev_kfree_skb(skb);
1418 len = 0;
1419 } else
1420 goto push_packet;
1421 }
1422 goto skip;
1423 oom:
1424 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1425 /* Drop the packet and recycle skb */
1426 ++dev->estats.rx_dropped_oom;
1427 emac_recycle_rx_skb(dev, slot, 0);
1428 goto next;
1429 }
1360 1430
1361 /* set transmit request threshold register */ 1431 if (received) {
1362 out_be32(&emacp->em0trtr, EMAC_TRTR_DEFAULT); 1432 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1433 dev->rx_slot = slot;
1434 }
1363 1435
1364 /* Reconfigure multicast */ 1436 if (unlikely(budget && dev->commac.rx_stopped)) {
1365 __emac_set_multicast_list(fep->ndev); 1437 struct ocp_func_emac_data *emacdata = dev->def->additions;
1366 1438
1367 /* Set receiver/transmitter defaults */ 1439 barrier();
1368 out_be32(&emacp->em0rwmr, EMAC_RWMR_DEFAULT); 1440 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1369 out_be32(&emacp->em0tmr0, EMAC_TMR0_DEFAULT); 1441 DBG2("%d: rx restart" NL, dev->def->index);
1370 out_be32(&emacp->em0tmr1, EMAC_TMR1_DEFAULT); 1442 received = 0;
1443 goto again;
1444 }
1371 1445
1372 /* set frame gap */ 1446 if (dev->rx_sg_skb) {
1373 out_be32(&emacp->em0ipgvr, CONFIG_IBM_EMAC_FGAP); 1447 DBG2("%d: dropping partial rx packet" NL,
1374 1448 dev->def->index);
1375 /* set VLAN Tag Protocol Identifier */ 1449 ++dev->estats.rx_dropped_error;
1376 out_be32(&emacp->em0vtpid, 0x8100); 1450 dev_kfree_skb(dev->rx_sg_skb);
1451 dev->rx_sg_skb = NULL;
1452 }
1377 1453
1378 /* Init ring buffers */ 1454 dev->commac.rx_stopped = 0;
1379 emac_init_rings(fep->ndev); 1455 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1456 emac_rx_enable(dev);
1457 dev->rx_slot = 0;
1458 }
1459 return received;
1380} 1460}
1381 1461
1382static void emac_kick(struct ocp_enet_private *fep) 1462/* BHs disabled */
1463static int emac_peek_rx(void *param)
1383{ 1464{
1384 emac_t *emacp = fep->emacp; 1465 struct ocp_enet_private *dev = param;
1385 unsigned long emac_ier; 1466 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1386 1467}
1387 emac_ier = EMAC_ISR_PP | EMAC_ISR_BP | EMAC_ISR_RP |
1388 EMAC_ISR_SE | EMAC_ISR_PTLE | EMAC_ISR_ALE |
1389 EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
1390 1468
1391 out_be32(&emacp->em0iser, emac_ier); 1469/* BHs disabled */
1470static int emac_peek_rx_sg(void *param)
1471{
1472 struct ocp_enet_private *dev = param;
1473 int slot = dev->rx_slot;
1474 while (1) {
1475 u16 ctrl = dev->rx_desc[slot].ctrl;
1476 if (ctrl & MAL_RX_CTRL_EMPTY)
1477 return 0;
1478 else if (ctrl & MAL_RX_CTRL_LAST)
1479 return 1;
1392 1480
1393 /* enable all MAL transmit and receive channels */ 1481 slot = (slot + 1) % NUM_RX_BUFF;
1394 mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
1395 mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
1396 1482
1397 /* set transmit and receive enable */ 1483 /* I'm just being paranoid here :) */
1398 out_be32(&emacp->em0mr0, EMAC_M0_TXE | EMAC_M0_RXE); 1484 if (unlikely(slot == dev->rx_slot))
1485 return 0;
1486 }
1399} 1487}
1400 1488
1401static void 1489/* Hard IRQ */
1402emac_start_link(struct ocp_enet_private *fep, struct ethtool_cmd *ep) 1490static void emac_rxde(void *param)
1403{ 1491{
1404 u32 advertise; 1492 struct ocp_enet_private *dev = param;
1405 int autoneg; 1493 ++dev->estats.rx_stopped;
1406 int forced_speed; 1494 emac_rx_disable_async(dev);
1407 int forced_duplex; 1495}
1408 1496
1409 /* Default advertise */ 1497/* Hard IRQ */
1410 advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | 1498static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1411 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | 1499{
1412 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full; 1500 struct ocp_enet_private *dev = dev_instance;
1413 autoneg = fep->want_autoneg; 1501 struct emac_regs *p = dev->emacp;
1414 forced_speed = fep->phy_mii.speed; 1502 struct ibm_emac_error_stats *st = &dev->estats;
1415 forced_duplex = fep->phy_mii.duplex; 1503
1504 u32 isr = in_be32(&p->isr);
1505 out_be32(&p->isr, isr);
1506
1507 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1508
1509 if (isr & EMAC_ISR_TXPE)
1510 ++st->tx_parity;
1511 if (isr & EMAC_ISR_RXPE)
1512 ++st->rx_parity;
1513 if (isr & EMAC_ISR_TXUE)
1514 ++st->tx_underrun;
1515 if (isr & EMAC_ISR_RXOE)
1516 ++st->rx_fifo_overrun;
1517 if (isr & EMAC_ISR_OVR)
1518 ++st->rx_overrun;
1519 if (isr & EMAC_ISR_BP)
1520 ++st->rx_bad_packet;
1521 if (isr & EMAC_ISR_RP)
1522 ++st->rx_runt_packet;
1523 if (isr & EMAC_ISR_SE)
1524 ++st->rx_short_event;
1525 if (isr & EMAC_ISR_ALE)
1526 ++st->rx_alignment_error;
1527 if (isr & EMAC_ISR_BFCS)
1528 ++st->rx_bad_fcs;
1529 if (isr & EMAC_ISR_PTLE)
1530 ++st->rx_packet_too_long;
1531 if (isr & EMAC_ISR_ORE)
1532 ++st->rx_out_of_range;
1533 if (isr & EMAC_ISR_IRE)
1534 ++st->rx_in_range;
1535 if (isr & EMAC_ISR_SQE)
1536 ++st->tx_sqe;
1537 if (isr & EMAC_ISR_TE)
1538 ++st->tx_errors;
1416 1539
1417 /* Setup link parameters */ 1540 return IRQ_HANDLED;
1418 if (ep) { 1541}
1419 if (ep->autoneg == AUTONEG_ENABLE) {
1420 advertise = ep->advertising;
1421 autoneg = 1;
1422 } else {
1423 autoneg = 0;
1424 forced_speed = ep->speed;
1425 forced_duplex = ep->duplex;
1426 }
1427 }
1428 1542
1429 /* Configure PHY & start aneg */ 1543static struct net_device_stats *emac_stats(struct net_device *ndev)
1430 fep->want_autoneg = autoneg; 1544{
1431 if (autoneg) { 1545 struct ocp_enet_private *dev = ndev->priv;
1432 LINK_DEBUG(("%s: start link aneg, advertise: 0x%x\n", 1546 struct ibm_emac_stats *st = &dev->stats;
1433 fep->ndev->name, advertise)); 1547 struct ibm_emac_error_stats *est = &dev->estats;
1434 fep->phy_mii.def->ops->setup_aneg(&fep->phy_mii, advertise); 1548 struct net_device_stats *nst = &dev->nstats;
1435 } else { 1549
1436 LINK_DEBUG(("%s: start link forced, speed: %d, duplex: %d\n", 1550 DBG2("%d: stats" NL, dev->def->index);
1437 fep->ndev->name, forced_speed, forced_duplex)); 1551
1438 fep->phy_mii.def->ops->setup_forced(&fep->phy_mii, forced_speed, 1552 /* Compute "legacy" statistics */
1439 forced_duplex); 1553 local_irq_disable();
1440 } 1554 nst->rx_packets = (unsigned long)st->rx_packets;
1441 fep->timer_ticks = 0; 1555 nst->rx_bytes = (unsigned long)st->rx_bytes;
1442 mod_timer(&fep->link_timer, jiffies + HZ); 1556 nst->tx_packets = (unsigned long)st->tx_packets;
1557 nst->tx_bytes = (unsigned long)st->tx_bytes;
1558 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1559 est->rx_dropped_error +
1560 est->rx_dropped_resize +
1561 est->rx_dropped_mtu);
1562 nst->tx_dropped = (unsigned long)est->tx_dropped;
1563
1564 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1565 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1566 est->rx_fifo_overrun +
1567 est->rx_overrun);
1568 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1569 est->rx_alignment_error);
1570 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1571 est->rx_bad_fcs);
1572 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1573 est->rx_bd_short_event +
1574 est->rx_bd_packet_too_long +
1575 est->rx_bd_out_of_range +
1576 est->rx_bd_in_range +
1577 est->rx_runt_packet +
1578 est->rx_short_event +
1579 est->rx_packet_too_long +
1580 est->rx_out_of_range +
1581 est->rx_in_range);
1582
1583 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1584 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1585 est->tx_underrun);
1586 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1587 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1588 est->tx_bd_excessive_collisions +
1589 est->tx_bd_late_collision +
1590 est->tx_bd_multple_collisions);
1591 local_irq_enable();
1592 return nst;
1443} 1593}
1444 1594
1445static void emac_link_timer(unsigned long data) 1595static void emac_remove(struct ocp_device *ocpdev)
1446{ 1596{
1447 struct ocp_enet_private *fep = (struct ocp_enet_private *)data; 1597 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1448 int link;
1449 1598
1450 if (fep->going_away) 1599 DBG("%d: remove" NL, dev->def->index);
1451 return;
1452 1600
1453 spin_lock_irq(&fep->lock); 1601 ocp_set_drvdata(ocpdev, 0);
1602 unregister_netdev(dev->ndev);
1454 1603
1455 link = fep->phy_mii.def->ops->poll_link(&fep->phy_mii); 1604 tah_fini(dev->tah_dev);
1456 LINK_DEBUG(("%s: poll_link: %d\n", fep->ndev->name, link)); 1605 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1606 zmii_fini(dev->zmii_dev, dev->zmii_input);
1457 1607
1458 if (link == netif_carrier_ok(fep->ndev)) { 1608 emac_dbg_register(dev->def->index, 0);
1459 if (!link && fep->want_autoneg && (++fep->timer_ticks) > 10) 1609
1460 emac_start_link(fep, NULL); 1610 mal_unregister_commac(dev->mal, &dev->commac);
1461 goto out; 1611 iounmap((void *)dev->emacp);
1462 } 1612 kfree(dev->ndev);
1463 printk(KERN_INFO "%s: Link is %s\n", fep->ndev->name,
1464 link ? "Up" : "Down");
1465 if (link) {
1466 netif_carrier_on(fep->ndev);
1467 /* Chip needs a full reset on config change. That sucks, so I
1468 * should ultimately move that to some tasklet to limit
1469 * latency peaks caused by this code
1470 */
1471 emac_reset_configure(fep);
1472 if (fep->opened)
1473 emac_kick(fep);
1474 } else {
1475 fep->timer_ticks = 0;
1476 netif_carrier_off(fep->ndev);
1477 }
1478 out:
1479 mod_timer(&fep->link_timer, jiffies + HZ);
1480 spin_unlock_irq(&fep->lock);
1481} 1613}
1482 1614
1483static void emac_set_multicast_list(struct net_device *dev) 1615static struct mal_commac_ops emac_commac_ops = {
1484{ 1616 .poll_tx = &emac_poll_tx,
1485 struct ocp_enet_private *fep = dev->priv; 1617 .poll_rx = &emac_poll_rx,
1618 .peek_rx = &emac_peek_rx,
1619 .rxde = &emac_rxde,
1620};
1486 1621
1487 spin_lock_irq(&fep->lock); 1622static struct mal_commac_ops emac_commac_sg_ops = {
1488 __emac_set_multicast_list(dev); 1623 .poll_tx = &emac_poll_tx,
1489 spin_unlock_irq(&fep->lock); 1624 .poll_rx = &emac_poll_rx,
1490} 1625 .peek_rx = &emac_peek_rx_sg,
1626 .rxde = &emac_rxde,
1627};
1491 1628
1492static int emac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 1629/* Ethtool support */
1630static int emac_ethtool_get_settings(struct net_device *ndev,
1631 struct ethtool_cmd *cmd)
1493{ 1632{
1494 struct ocp_enet_private *fep = ndev->priv; 1633 struct ocp_enet_private *dev = ndev->priv;
1495 1634
1496 cmd->supported = fep->phy_mii.def->features; 1635 cmd->supported = dev->phy.features;
1497 cmd->port = PORT_MII; 1636 cmd->port = PORT_MII;
1498 cmd->transceiver = XCVR_EXTERNAL; 1637 cmd->phy_address = dev->phy.address;
1499 cmd->phy_address = fep->mii_phy_addr; 1638 cmd->transceiver =
1500 spin_lock_irq(&fep->lock); 1639 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1501 cmd->autoneg = fep->want_autoneg; 1640
1502 cmd->speed = fep->phy_mii.speed; 1641 local_bh_disable();
1503 cmd->duplex = fep->phy_mii.duplex; 1642 cmd->advertising = dev->phy.advertising;
1504 spin_unlock_irq(&fep->lock); 1643 cmd->autoneg = dev->phy.autoneg;
1644 cmd->speed = dev->phy.speed;
1645 cmd->duplex = dev->phy.duplex;
1646 local_bh_enable();
1647
1505 return 0; 1648 return 0;
1506} 1649}
1507 1650
1508static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) 1651static int emac_ethtool_set_settings(struct net_device *ndev,
1652 struct ethtool_cmd *cmd)
1509{ 1653{
1510 struct ocp_enet_private *fep = ndev->priv; 1654 struct ocp_enet_private *dev = ndev->priv;
1511 unsigned long features = fep->phy_mii.def->features; 1655 u32 f = dev->phy.features;
1512 1656
1513 if (!capable(CAP_NET_ADMIN)) 1657 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1514 return -EPERM; 1658 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1515 1659
1660 /* Basic sanity checks */
1661 if (dev->phy.address < 0)
1662 return -EOPNOTSUPP;
1516 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) 1663 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1517 return -EINVAL; 1664 return -EINVAL;
1518 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0) 1665 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1519 return -EINVAL; 1666 return -EINVAL;
1520 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) 1667 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1521 return -EINVAL; 1668 return -EINVAL;
1522 if (cmd->autoneg == AUTONEG_DISABLE) 1669
1670 if (cmd->autoneg == AUTONEG_DISABLE) {
1523 switch (cmd->speed) { 1671 switch (cmd->speed) {
1524 case SPEED_10: 1672 case SPEED_10:
1525 if (cmd->duplex == DUPLEX_HALF && 1673 if (cmd->duplex == DUPLEX_HALF
1526 (features & SUPPORTED_10baseT_Half) == 0) 1674 && !(f & SUPPORTED_10baseT_Half))
1527 return -EINVAL; 1675 return -EINVAL;
1528 if (cmd->duplex == DUPLEX_FULL && 1676 if (cmd->duplex == DUPLEX_FULL
1529 (features & SUPPORTED_10baseT_Full) == 0) 1677 && !(f & SUPPORTED_10baseT_Full))
1530 return -EINVAL; 1678 return -EINVAL;
1531 break; 1679 break;
1532 case SPEED_100: 1680 case SPEED_100:
1533 if (cmd->duplex == DUPLEX_HALF && 1681 if (cmd->duplex == DUPLEX_HALF
1534 (features & SUPPORTED_100baseT_Half) == 0) 1682 && !(f & SUPPORTED_100baseT_Half))
1535 return -EINVAL; 1683 return -EINVAL;
1536 if (cmd->duplex == DUPLEX_FULL && 1684 if (cmd->duplex == DUPLEX_FULL
1537 (features & SUPPORTED_100baseT_Full) == 0) 1685 && !(f & SUPPORTED_100baseT_Full))
1538 return -EINVAL; 1686 return -EINVAL;
1539 break; 1687 break;
1540 case SPEED_1000: 1688 case SPEED_1000:
1541 if (cmd->duplex == DUPLEX_HALF && 1689 if (cmd->duplex == DUPLEX_HALF
1542 (features & SUPPORTED_1000baseT_Half) == 0) 1690 && !(f & SUPPORTED_1000baseT_Half))
1543 return -EINVAL; 1691 return -EINVAL;
1544 if (cmd->duplex == DUPLEX_FULL && 1692 if (cmd->duplex == DUPLEX_FULL
1545 (features & SUPPORTED_1000baseT_Full) == 0) 1693 && !(f & SUPPORTED_1000baseT_Full))
1546 return -EINVAL; 1694 return -EINVAL;
1547 break; 1695 break;
1548 default: 1696 default:
1549 return -EINVAL; 1697 return -EINVAL;
1550 } else if ((features & SUPPORTED_Autoneg) == 0) 1698 }
1551 return -EINVAL; 1699
1552 spin_lock_irq(&fep->lock); 1700 local_bh_disable();
1553 emac_start_link(fep, cmd); 1701 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1554 spin_unlock_irq(&fep->lock); 1702 cmd->duplex);
1703
1704 } else {
1705 if (!(f & SUPPORTED_Autoneg))
1706 return -EINVAL;
1707
1708 local_bh_disable();
1709 dev->phy.def->ops->setup_aneg(&dev->phy,
1710 (cmd->advertising & f) |
1711 (dev->phy.advertising &
1712 (ADVERTISED_Pause |
1713 ADVERTISED_Asym_Pause)));
1714 }
1715 emac_force_link_update(dev);
1716 local_bh_enable();
1717
1555 return 0; 1718 return 0;
1556} 1719}
1557 1720
1558static void 1721static void emac_ethtool_get_ringparam(struct net_device *ndev,
1559emac_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) 1722 struct ethtool_ringparam *rp)
1560{ 1723{
1561 struct ocp_enet_private *fep = ndev->priv; 1724 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1562 1725 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1563 strcpy(info->driver, DRV_NAME);
1564 strcpy(info->version, DRV_VERSION);
1565 info->fw_version[0] = '\0';
1566 sprintf(info->bus_info, "IBM EMAC %d", fep->ocpdev->def->index);
1567 info->regdump_len = 0;
1568} 1726}
1569 1727
1570static int emac_nway_reset(struct net_device *ndev) 1728static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1729 struct ethtool_pauseparam *pp)
1571{ 1730{
1572 struct ocp_enet_private *fep = ndev->priv; 1731 struct ocp_enet_private *dev = ndev->priv;
1732
1733 local_bh_disable();
1734 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1735 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1736 pp->autoneg = 1;
1737
1738 if (dev->phy.duplex == DUPLEX_FULL) {
1739 if (dev->phy.pause)
1740 pp->rx_pause = pp->tx_pause = 1;
1741 else if (dev->phy.asym_pause)
1742 pp->tx_pause = 1;
1743 }
1744 local_bh_enable();
1745}
1573 1746
1574 if (!fep->want_autoneg) 1747static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1575 return -EINVAL; 1748{
1576 spin_lock_irq(&fep->lock); 1749 struct ocp_enet_private *dev = ndev->priv;
1577 emac_start_link(fep, NULL); 1750 return dev->tah_dev != 0;
1578 spin_unlock_irq(&fep->lock);
1579 return 0;
1580} 1751}
1581 1752
1582static u32 emac_get_link(struct net_device *ndev) 1753static int emac_get_regs_len(struct ocp_enet_private *dev)
1583{ 1754{
1584 return netif_carrier_ok(ndev); 1755 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1585} 1756}
1586 1757
1587static struct ethtool_ops emac_ethtool_ops = { 1758static int emac_ethtool_get_regs_len(struct net_device *ndev)
1588 .get_settings = emac_get_settings, 1759{
1589 .set_settings = emac_set_settings, 1760 struct ocp_enet_private *dev = ndev->priv;
1590 .get_drvinfo = emac_get_drvinfo, 1761 return sizeof(struct emac_ethtool_regs_hdr) +
1591 .nway_reset = emac_nway_reset, 1762 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1592 .get_link = emac_get_link 1763 zmii_get_regs_len(dev->zmii_dev) +
1593}; 1764 rgmii_get_regs_len(dev->rgmii_dev) +
1765 tah_get_regs_len(dev->tah_dev);
1766}
1594 1767
1595static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1768static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1596{ 1769{
1597 struct ocp_enet_private *fep = dev->priv; 1770 struct emac_ethtool_regs_subhdr *hdr = buf;
1598 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1599 1771
1600 switch (cmd) { 1772 hdr->version = EMAC_ETHTOOL_REGS_VER;
1601 case SIOCGMIIPHY: 1773 hdr->index = dev->def->index;
1602 data[0] = fep->mii_phy_addr; 1774 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1603 /* Fall through */ 1775 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1604 case SIOCGMIIREG: 1776}
1605 data[3] = emac_phy_read(dev, fep->mii_phy_addr, data[1]);
1606 return 0;
1607 case SIOCSMIIREG:
1608 if (!capable(CAP_NET_ADMIN))
1609 return -EPERM;
1610 1777
1611 emac_phy_write(dev, fep->mii_phy_addr, data[1], data[2]); 1778static void emac_ethtool_get_regs(struct net_device *ndev,
1612 return 0; 1779 struct ethtool_regs *regs, void *buf)
1613 default: 1780{
1614 return -EOPNOTSUPP; 1781 struct ocp_enet_private *dev = ndev->priv;
1782 struct emac_ethtool_regs_hdr *hdr = buf;
1783
1784 hdr->components = 0;
1785 buf = hdr + 1;
1786
1787 local_irq_disable();
1788 buf = mal_dump_regs(dev->mal, buf);
1789 buf = emac_dump_regs(dev, buf);
1790 if (dev->zmii_dev) {
1791 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1792 buf = zmii_dump_regs(dev->zmii_dev, buf);
1615 } 1793 }
1794 if (dev->rgmii_dev) {
1795 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1796 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1797 }
1798 if (dev->tah_dev) {
1799 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1800 buf = tah_dump_regs(dev->tah_dev, buf);
1801 }
1802 local_irq_enable();
1616} 1803}
1617 1804
1618static int emac_open(struct net_device *dev) 1805static int emac_ethtool_nway_reset(struct net_device *ndev)
1619{ 1806{
1620 struct ocp_enet_private *fep = dev->priv; 1807 struct ocp_enet_private *dev = ndev->priv;
1621 int rc; 1808 int res = 0;
1622 1809
1623 spin_lock_irq(&fep->lock); 1810 DBG("%d: nway_reset" NL, dev->def->index);
1624 1811
1625 fep->opened = 1; 1812 if (dev->phy.address < 0)
1626 netif_carrier_off(dev); 1813 return -EOPNOTSUPP;
1627 1814
1628 /* Reset & configure the chip */ 1815 local_bh_disable();
1629 emac_reset_configure(fep); 1816 if (!dev->phy.autoneg) {
1817 res = -EINVAL;
1818 goto out;
1819 }
1630 1820
1631 spin_unlock_irq(&fep->lock); 1821 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1822 emac_force_link_update(dev);
1632 1823
1633 /* Request our interrupt lines */ 1824 out:
1634 rc = request_irq(dev->irq, emac_mac_irq, 0, "IBM EMAC MAC", dev); 1825 local_bh_enable();
1635 if (rc != 0) { 1826 return res;
1636 printk("dev->irq %d failed\n", dev->irq); 1827}
1637 goto bail;
1638 }
1639 /* Kick the chip rx & tx channels into life */
1640 spin_lock_irq(&fep->lock);
1641 emac_kick(fep);
1642 spin_unlock_irq(&fep->lock);
1643 1828
1644 netif_start_queue(dev); 1829static int emac_ethtool_get_stats_count(struct net_device *ndev)
1645 bail: 1830{
1646 return rc; 1831 return EMAC_ETHTOOL_STATS_COUNT;
1647} 1832}
1648 1833
1649static int emac_close(struct net_device *dev) 1834static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1835 u8 * buf)
1650{ 1836{
1651 struct ocp_enet_private *fep = dev->priv; 1837 if (stringset == ETH_SS_STATS)
1652 emac_t *emacp = fep->emacp; 1838 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1839}
1653 1840
1654 /* XXX Stop IRQ emitting here */ 1841static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1655 spin_lock_irq(&fep->lock); 1842 struct ethtool_stats *estats,
1656 fep->opened = 0; 1843 u64 * tmp_stats)
1657 mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask); 1844{
1658 mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask); 1845 struct ocp_enet_private *dev = ndev->priv;
1659 netif_carrier_off(dev); 1846 local_irq_disable();
1660 netif_stop_queue(dev); 1847 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1848 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1849 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1850 local_irq_enable();
1851}
1661 1852
1662 /* 1853static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1663 * Check for a link, some PHYs don't provide a clock if 1854 struct ethtool_drvinfo *info)
1664 * no link is present. Some EMACs will not come out of 1855{
1665 * soft reset without a PHY clock present. 1856 struct ocp_enet_private *dev = ndev->priv;
1666 */
1667 if (fep->phy_mii.def->ops->poll_link(&fep->phy_mii)) {
1668 out_be32(&emacp->em0mr0, EMAC_M0_SRST);
1669 udelay(10);
1670 1857
1671 if (emacp->em0mr0 & EMAC_M0_SRST) { 1858 strcpy(info->driver, "ibm_emac");
1672 /*not sure what to do here hopefully it clears before another open */ 1859 strcpy(info->version, DRV_VERSION);
1673 printk(KERN_ERR 1860 info->fw_version[0] = '\0';
1674 "%s: Phy SoftReset didn't clear, no link?\n", 1861 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1675 dev->name); 1862 info->n_stats = emac_ethtool_get_stats_count(ndev);
1676 } 1863 info->regdump_len = emac_ethtool_get_regs_len(ndev);
1677 } 1864}
1678 1865
1679 /* Free the irq's */ 1866static struct ethtool_ops emac_ethtool_ops = {
1680 free_irq(dev->irq, dev); 1867 .get_settings = emac_ethtool_get_settings,
1868 .set_settings = emac_ethtool_set_settings,
1869 .get_drvinfo = emac_ethtool_get_drvinfo,
1681 1870
1682 spin_unlock_irq(&fep->lock); 1871 .get_regs_len = emac_ethtool_get_regs_len,
1872 .get_regs = emac_ethtool_get_regs,
1683 1873
1684 return 0; 1874 .nway_reset = emac_ethtool_nway_reset,
1685}
1686 1875
1687static void emac_remove(struct ocp_device *ocpdev) 1876 .get_ringparam = emac_ethtool_get_ringparam,
1688{ 1877 .get_pauseparam = emac_ethtool_get_pauseparam,
1689 struct net_device *dev = ocp_get_drvdata(ocpdev); 1878
1690 struct ocp_enet_private *ep = dev->priv; 1879 .get_rx_csum = emac_ethtool_get_rx_csum,
1691 1880
1692 /* FIXME: locking, races, ... */ 1881 .get_strings = emac_ethtool_get_strings,
1693 ep->going_away = 1; 1882 .get_stats_count = emac_ethtool_get_stats_count,
1694 ocp_set_drvdata(ocpdev, NULL); 1883 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1695 if (ep->rgmii_dev) 1884
1696 emac_close_rgmii(ep->rgmii_dev); 1885 .get_link = ethtool_op_get_link,
1697 if (ep->zmii_dev) 1886 .get_tx_csum = ethtool_op_get_tx_csum,
1698 emac_close_zmii(ep->zmii_dev); 1887 .get_sg = ethtool_op_get_sg,
1699
1700 unregister_netdev(dev);
1701 del_timer_sync(&ep->link_timer);
1702 mal_unregister_commac(ep->mal, &ep->commac);
1703 iounmap((void *)ep->emacp);
1704 kfree(dev);
1705}
1706
1707struct mal_commac_ops emac_commac_ops = {
1708 .txeob = &emac_txeob_dev,
1709 .txde = &emac_txde_dev,
1710 .rxeob = &emac_rxeob_dev,
1711 .rxde = &emac_rxde_dev,
1712}; 1888};
1713 1889
1714#ifdef CONFIG_NET_POLL_CONTROLLER 1890static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1715static void emac_netpoll(struct net_device *ndev)
1716{ 1891{
1717 emac_rxeob_dev((void *)ndev, 0); 1892 struct ocp_enet_private *dev = ndev->priv;
1718 emac_txeob_dev((void *)ndev, 0); 1893 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1894
1895 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1896
1897 if (dev->phy.address < 0)
1898 return -EOPNOTSUPP;
1899
1900 switch (cmd) {
1901 case SIOCGMIIPHY:
1902 case SIOCDEVPRIVATE:
1903 data[0] = dev->phy.address;
1904 /* Fall through */
1905 case SIOCGMIIREG:
1906 case SIOCDEVPRIVATE + 1:
1907 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1908 return 0;
1909
1910 case SIOCSMIIREG:
1911 case SIOCDEVPRIVATE + 2:
1912 if (!capable(CAP_NET_ADMIN))
1913 return -EPERM;
1914 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1915 return 0;
1916 default:
1917 return -EOPNOTSUPP;
1918 }
1719} 1919}
1720#endif
1721 1920
1722static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal) 1921static int __init emac_probe(struct ocp_device *ocpdev)
1723{ 1922{
1724 int deferred_init = 0; 1923 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1725 int rc = 0, i;
1726 struct net_device *ndev; 1924 struct net_device *ndev;
1727 struct ocp_enet_private *ep; 1925 struct ocp_device *maldev;
1728 struct ocp_func_emac_data *emacdata; 1926 struct ocp_enet_private *dev;
1729 int commac_reg = 0; 1927 int err, i;
1730 u32 phy_map; 1928
1929 DBG("%d: probe" NL, ocpdev->def->index);
1731 1930
1732 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1733 if (!emacdata) { 1931 if (!emacdata) {
1734 printk(KERN_ERR "emac%d: Missing additional data!\n", 1932 printk(KERN_ERR "emac%d: Missing additional data!\n",
1735 ocpdev->def->index); 1933 ocpdev->def->index);
@@ -1738,304 +1936,312 @@ static int emac_init_device(struct ocp_device *ocpdev, struct ibm_ocp_mal *mal)
1738 1936
1739 /* Allocate our net_device structure */ 1937 /* Allocate our net_device structure */
1740 ndev = alloc_etherdev(sizeof(struct ocp_enet_private)); 1938 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1741 if (ndev == NULL) { 1939 if (!ndev) {
1742 printk(KERN_ERR 1940 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1743 "emac%d: Could not allocate ethernet device.\n",
1744 ocpdev->def->index); 1941 ocpdev->def->index);
1745 return -ENOMEM; 1942 return -ENOMEM;
1746 } 1943 }
1747 ep = ndev->priv; 1944 dev = ndev->priv;
1748 ep->ndev = ndev; 1945 dev->ndev = ndev;
1749 ep->ocpdev = ocpdev; 1946 dev->ldev = &ocpdev->dev;
1750 ndev->irq = ocpdev->def->irq; 1947 dev->def = ocpdev->def;
1751 ep->wol_irq = emacdata->wol_irq; 1948 SET_MODULE_OWNER(ndev);
1752 if (emacdata->mdio_idx >= 0) {
1753 if (emacdata->mdio_idx == ocpdev->def->index) {
1754 /* Set the common MDIO net_device */
1755 mdio_ndev = ndev;
1756 deferred_init = 1;
1757 }
1758 ep->mdio_dev = mdio_ndev;
1759 } else {
1760 ep->mdio_dev = ndev;
1761 }
1762 1949
1763 ocp_set_drvdata(ocpdev, ndev); 1950 /* Find MAL device we are connected to */
1764 1951 maldev =
1765 spin_lock_init(&ep->lock); 1952 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1766 1953 if (!maldev) {
1767 /* Fill out MAL informations and register commac */ 1954 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1768 ep->mal = mal; 1955 dev->def->index, emacdata->mal_idx);
1769 ep->mal_tx_chan = emacdata->mal_tx_chan; 1956 err = -ENODEV;
1770 ep->mal_rx_chan = emacdata->mal_rx_chan; 1957 goto out;
1771 ep->commac.ops = &emac_commac_ops; 1958 }
1772 ep->commac.dev = ndev; 1959 dev->mal = ocp_get_drvdata(maldev);
1773 ep->commac.tx_chan_mask = MAL_CHAN_MASK(ep->mal_tx_chan); 1960 if (!dev->mal) {
1774 ep->commac.rx_chan_mask = MAL_CHAN_MASK(ep->mal_rx_chan); 1961 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1775 rc = mal_register_commac(ep->mal, &ep->commac); 1962 dev->def->index, emacdata->mal_idx);
1776 if (rc != 0) 1963 err = -ENODEV;
1777 goto bail; 1964 goto out;
1778 commac_reg = 1;
1779
1780 /* Map our MMIOs */
1781 ep->emacp = (emac_t *) ioremap(ocpdev->def->paddr, sizeof(emac_t));
1782
1783 /* Check if we need to attach to a ZMII */
1784 if (emacdata->zmii_idx >= 0) {
1785 ep->zmii_input = emacdata->zmii_mux;
1786 ep->zmii_dev =
1787 ocp_find_device(OCP_ANY_ID, OCP_FUNC_ZMII,
1788 emacdata->zmii_idx);
1789 if (ep->zmii_dev == NULL)
1790 printk(KERN_WARNING
1791 "emac%d: ZMII %d requested but not found !\n",
1792 ocpdev->def->index, emacdata->zmii_idx);
1793 else if ((rc =
1794 emac_init_zmii(ep->zmii_dev, ep->zmii_input,
1795 emacdata->phy_mode)) != 0)
1796 goto bail;
1797 } 1965 }
1798 1966
1799 /* Check if we need to attach to a RGMII */ 1967 /* Register with MAL */
1800 if (emacdata->rgmii_idx >= 0) { 1968 dev->commac.ops = &emac_commac_ops;
1801 ep->rgmii_input = emacdata->rgmii_mux; 1969 dev->commac.dev = dev;
1802 ep->rgmii_dev = 1970 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1803 ocp_find_device(OCP_ANY_ID, OCP_FUNC_RGMII, 1971 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1804 emacdata->rgmii_idx); 1972 err = mal_register_commac(dev->mal, &dev->commac);
1805 if (ep->rgmii_dev == NULL) 1973 if (err) {
1806 printk(KERN_WARNING 1974 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1807 "emac%d: RGMII %d requested but not found !\n", 1975 dev->def->index, emacdata->mal_idx);
1808 ocpdev->def->index, emacdata->rgmii_idx); 1976 goto out;
1809 else if ((rc = 1977 }
1810 emac_init_rgmii(ep->rgmii_dev, ep->rgmii_input, 1978 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1811 emacdata->phy_mode)) != 0) 1979 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1812 goto bail; 1980
1981 /* Get pointers to BD rings */
1982 dev->tx_desc =
1983 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1984 emacdata->mal_tx_chan);
1985 dev->rx_desc =
1986 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1987 emacdata->mal_rx_chan);
1988
1989 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1990 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1991
1992 /* Clean rings */
1993 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1994 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1995
1996 /* If we depend on another EMAC for MDIO, check whether it was probed already */
1997 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
1998 struct ocp_device *mdiodev =
1999 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2000 emacdata->mdio_idx);
2001 if (!mdiodev) {
2002 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2003 dev->def->index, emacdata->mdio_idx);
2004 err = -ENODEV;
2005 goto out2;
2006 }
2007 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2008 if (!dev->mdio_dev) {
2009 printk(KERN_ERR
2010 "emac%d: emac%d hasn't been initialized yet!\n",
2011 dev->def->index, emacdata->mdio_idx);
2012 err = -ENODEV;
2013 goto out2;
2014 }
1813 } 2015 }
1814 2016
1815 /* Check if we need to attach to a TAH */ 2017 /* Attach to ZMII, if needed */
1816 if (emacdata->tah_idx >= 0) { 2018 if ((err = zmii_attach(dev)) != 0)
1817 ep->tah_dev = 2019 goto out2;
1818 ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH, 2020
1819 emacdata->tah_idx); 2021 /* Attach to RGMII, if needed */
1820 if (ep->tah_dev == NULL) 2022 if ((err = rgmii_attach(dev)) != 0)
1821 printk(KERN_WARNING 2023 goto out3;
1822 "emac%d: TAH %d requested but not found !\n", 2024
1823 ocpdev->def->index, emacdata->tah_idx); 2025 /* Attach to TAH, if needed */
1824 else if ((rc = emac_init_tah(ep)) != 0) 2026 if ((err = tah_attach(dev)) != 0)
1825 goto bail; 2027 goto out4;
2028
2029 /* Map EMAC regs */
2030 dev->emacp =
2031 (struct emac_regs *)ioremap(dev->def->paddr,
2032 sizeof(struct emac_regs));
2033 if (!dev->emacp) {
2034 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2035 dev->def->index);
2036 err = -ENOMEM;
2037 goto out5;
1826 } 2038 }
1827 2039
1828 if (deferred_init) { 2040 /* Fill in MAC address */
1829 if (!list_empty(&emac_init_list)) { 2041 for (i = 0; i < 6; ++i)
1830 struct list_head *entry; 2042 ndev->dev_addr[i] = emacdata->mac_addr[i];
1831 struct emac_def_dev *ddev;
1832 2043
1833 list_for_each(entry, &emac_init_list) { 2044 /* Set some link defaults before we can find out real parameters */
1834 ddev = 2045 dev->phy.speed = SPEED_100;
1835 list_entry(entry, struct emac_def_dev, 2046 dev->phy.duplex = DUPLEX_FULL;
1836 link); 2047 dev->phy.autoneg = AUTONEG_DISABLE;
1837 emac_init_device(ddev->ocpdev, ddev->mal); 2048 dev->phy.pause = dev->phy.asym_pause = 0;
1838 } 2049 init_timer(&dev->link_timer);
2050 dev->link_timer.function = emac_link_timer;
2051 dev->link_timer.data = (unsigned long)dev;
2052
2053 /* Find PHY if any */
2054 dev->phy.dev = ndev;
2055 dev->phy.mode = emacdata->phy_mode;
2056 if (emacdata->phy_map != 0xffffffff) {
2057 u32 phy_map = emacdata->phy_map | busy_phy_map;
2058 u32 adv;
2059
2060 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2061 emacdata->phy_map, busy_phy_map);
2062
2063 EMAC_RX_CLK_TX(dev->def->index);
2064
2065 dev->phy.mdio_read = emac_mdio_read;
2066 dev->phy.mdio_write = emac_mdio_write;
2067
2068 /* Configure EMAC with defaults so we can at least use MDIO
2069 * This is needed mostly for 440GX
2070 */
2071 if (emac_phy_gpcs(dev->phy.mode)) {
2072 /* XXX
2073 * Make GPCS PHY address equal to EMAC index.
2074 * We probably should take into account busy_phy_map
2075 * and/or phy_map here.
2076 */
2077 dev->phy.address = dev->def->index;
1839 } 2078 }
1840 } 2079
2080 emac_configure(dev);
1841 2081
1842 /* Init link monitoring timer */ 2082 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
1843 init_timer(&ep->link_timer); 2083 if (!(phy_map & 1)) {
1844 ep->link_timer.function = emac_link_timer; 2084 int r;
1845 ep->link_timer.data = (unsigned long)ep; 2085 busy_phy_map |= 1 << i;
1846 ep->timer_ticks = 0; 2086
1847 2087 /* Quick check if there is a PHY at the address */
1848 /* Fill up the mii_phy structure */ 2088 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
1849 ep->phy_mii.dev = ndev; 2089 if (r == 0xffff || r < 0)
1850 ep->phy_mii.mdio_read = emac_phy_read; 2090 continue;
1851 ep->phy_mii.mdio_write = emac_phy_write; 2091 if (!mii_phy_probe(&dev->phy, i))
1852 ep->phy_mii.mode = emacdata->phy_mode; 2092 break;
1853 2093 }
1854 /* Find PHY */ 2094 if (i == 0x20) {
1855 phy_map = emacdata->phy_map | busy_phy_map; 2095 printk(KERN_WARNING "emac%d: can't find PHY!\n",
1856 for (i = 0; i <= 0x1f; i++, phy_map >>= 1) { 2096 dev->def->index);
1857 if ((phy_map & 0x1) == 0) { 2097 goto out6;
1858 int val = emac_phy_read(ndev, i, MII_BMCR);
1859 if (val != 0xffff && val != -1)
1860 break;
1861 } 2098 }
1862 }
1863 if (i == 0x20) {
1864 printk(KERN_WARNING "emac%d: Can't find PHY.\n",
1865 ocpdev->def->index);
1866 rc = -ENODEV;
1867 goto bail;
1868 }
1869 busy_phy_map |= 1 << i;
1870 ep->mii_phy_addr = i;
1871 rc = mii_phy_probe(&ep->phy_mii, i);
1872 if (rc) {
1873 printk(KERN_WARNING "emac%d: Failed to probe PHY type.\n",
1874 ocpdev->def->index);
1875 rc = -ENODEV;
1876 goto bail;
1877 }
1878
1879 /* Disable any PHY features not supported by the platform */
1880 ep->phy_mii.def->features &= ~emacdata->phy_feat_exc;
1881 2099
1882 /* Setup initial PHY config & startup aneg */ 2100 /* Init PHY */
1883 if (ep->phy_mii.def->ops->init) 2101 if (dev->phy.def->ops->init)
1884 ep->phy_mii.def->ops->init(&ep->phy_mii); 2102 dev->phy.def->ops->init(&dev->phy);
1885 netif_carrier_off(ndev);
1886 if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
1887 ep->want_autoneg = 1;
1888 else {
1889 ep->want_autoneg = 0;
1890 2103
1891 /* Select highest supported speed/duplex */ 2104 /* Disable any PHY features not supported by the platform */
1892 if (ep->phy_mii.def->features & SUPPORTED_1000baseT_Full) { 2105 dev->phy.def->features &= ~emacdata->phy_feat_exc;
1893 ep->phy_mii.speed = SPEED_1000; 2106
1894 ep->phy_mii.duplex = DUPLEX_FULL; 2107 /* Setup initial link parameters */
1895 } else if (ep->phy_mii.def->features & 2108 if (dev->phy.features & SUPPORTED_Autoneg) {
1896 SUPPORTED_1000baseT_Half) { 2109 adv = dev->phy.features;
1897 ep->phy_mii.speed = SPEED_1000; 2110#if !defined(CONFIG_40x)
1898 ep->phy_mii.duplex = DUPLEX_HALF; 2111 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1899 } else if (ep->phy_mii.def->features & 2112#endif
1900 SUPPORTED_100baseT_Full) { 2113 /* Restart autonegotiation */
1901 ep->phy_mii.speed = SPEED_100; 2114 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
1902 ep->phy_mii.duplex = DUPLEX_FULL;
1903 } else if (ep->phy_mii.def->features &
1904 SUPPORTED_100baseT_Half) {
1905 ep->phy_mii.speed = SPEED_100;
1906 ep->phy_mii.duplex = DUPLEX_HALF;
1907 } else if (ep->phy_mii.def->features &
1908 SUPPORTED_10baseT_Full) {
1909 ep->phy_mii.speed = SPEED_10;
1910 ep->phy_mii.duplex = DUPLEX_FULL;
1911 } else { 2115 } else {
1912 ep->phy_mii.speed = SPEED_10; 2116 u32 f = dev->phy.def->features;
1913 ep->phy_mii.duplex = DUPLEX_HALF; 2117 int speed = SPEED_10, fd = DUPLEX_HALF;
2118
2119 /* Select highest supported speed/duplex */
2120 if (f & SUPPORTED_1000baseT_Full) {
2121 speed = SPEED_1000;
2122 fd = DUPLEX_FULL;
2123 } else if (f & SUPPORTED_1000baseT_Half)
2124 speed = SPEED_1000;
2125 else if (f & SUPPORTED_100baseT_Full) {
2126 speed = SPEED_100;
2127 fd = DUPLEX_FULL;
2128 } else if (f & SUPPORTED_100baseT_Half)
2129 speed = SPEED_100;
2130 else if (f & SUPPORTED_10baseT_Full)
2131 fd = DUPLEX_FULL;
2132
2133 /* Force link parameters */
2134 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
1914 } 2135 }
1915 } 2136 } else {
1916 emac_start_link(ep, NULL); 2137 emac_reset(dev);
1917 2138
1918 /* read the MAC Address */ 2139 /* PHY-less configuration.
1919 for (i = 0; i < 6; i++) 2140 * XXX I probably should move these settings to emacdata
1920 ndev->dev_addr[i] = emacdata->mac_addr[i]; 2141 */
2142 dev->phy.address = -1;
2143 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2144 dev->phy.pause = 1;
2145 }
1921 2146
1922 /* Fill in the driver function table */ 2147 /* Fill in the driver function table */
1923 ndev->open = &emac_open; 2148 ndev->open = &emac_open;
1924 ndev->hard_start_xmit = &emac_start_xmit; 2149 if (dev->tah_dev) {
2150 ndev->hard_start_xmit = &emac_start_xmit_sg;
2151 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2152 } else
2153 ndev->hard_start_xmit = &emac_start_xmit;
2154 ndev->tx_timeout = &emac_full_tx_reset;
2155 ndev->watchdog_timeo = 5 * HZ;
1925 ndev->stop = &emac_close; 2156 ndev->stop = &emac_close;
1926 ndev->get_stats = &emac_stats; 2157 ndev->get_stats = &emac_stats;
1927 if (emacdata->jumbo)
1928 ndev->change_mtu = &emac_change_mtu;
1929 ndev->set_mac_address = &emac_set_mac_address;
1930 ndev->set_multicast_list = &emac_set_multicast_list; 2158 ndev->set_multicast_list = &emac_set_multicast_list;
1931 ndev->do_ioctl = &emac_ioctl; 2159 ndev->do_ioctl = &emac_ioctl;
2160 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2161 ndev->change_mtu = &emac_change_mtu;
2162 dev->commac.ops = &emac_commac_sg_ops;
2163 }
1932 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2164 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
1933 if (emacdata->tah_idx >= 0)
1934 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG;
1935#ifdef CONFIG_NET_POLL_CONTROLLER
1936 ndev->poll_controller = emac_netpoll;
1937#endif
1938 2165
1939 SET_MODULE_OWNER(ndev); 2166 netif_carrier_off(ndev);
2167 netif_stop_queue(ndev);
2168
2169 err = register_netdev(ndev);
2170 if (err) {
2171 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2172 dev->def->index, err);
2173 goto out6;
2174 }
1940 2175
1941 rc = register_netdev(ndev); 2176 ocp_set_drvdata(ocpdev, dev);
1942 if (rc != 0)
1943 goto bail;
1944 2177
1945 printk("%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x\n", 2178 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
1946 ndev->name, 2179 ndev->name, dev->def->index,
1947 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2], 2180 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
1948 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]); 2181 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
1949 printk(KERN_INFO "%s: Found %s PHY (0x%02x)\n",
1950 ndev->name, ep->phy_mii.def->name, ep->mii_phy_addr);
1951 2182
1952 bail: 2183 if (dev->phy.address >= 0)
1953 if (rc && commac_reg) 2184 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
1954 mal_unregister_commac(ep->mal, &ep->commac); 2185 dev->phy.def->name, dev->phy.address);
1955 if (rc && ndev)
1956 kfree(ndev);
1957 2186
1958 return rc; 2187 emac_dbg_register(dev->def->index, dev);
1959}
1960
1961static int emac_probe(struct ocp_device *ocpdev)
1962{
1963 struct ocp_device *maldev;
1964 struct ibm_ocp_mal *mal;
1965 struct ocp_func_emac_data *emacdata;
1966
1967 emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
1968 if (emacdata == NULL) {
1969 printk(KERN_ERR "emac%d: Missing additional datas !\n",
1970 ocpdev->def->index);
1971 return -ENODEV;
1972 }
1973
1974 /* Get the MAL device */
1975 maldev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_MAL, emacdata->mal_idx);
1976 if (maldev == NULL) {
1977 printk("No maldev\n");
1978 return -ENODEV;
1979 }
1980 /*
1981 * Get MAL driver data, it must be here due to link order.
1982 * When the driver is modularized, symbol dependencies will
1983 * ensure the MAL driver is already present if built as a
1984 * module.
1985 */
1986 mal = (struct ibm_ocp_mal *)ocp_get_drvdata(maldev);
1987 if (mal == NULL) {
1988 printk("No maldrv\n");
1989 return -ENODEV;
1990 }
1991
1992 /* If we depend on another EMAC for MDIO, wait for it to show up */
1993 if (emacdata->mdio_idx >= 0 &&
1994 (emacdata->mdio_idx != ocpdev->def->index) && !mdio_ndev) {
1995 struct emac_def_dev *ddev;
1996 /* Add this index to the deferred init table */
1997 ddev = kmalloc(sizeof(struct emac_def_dev), GFP_KERNEL);
1998 ddev->ocpdev = ocpdev;
1999 ddev->mal = mal;
2000 list_add_tail(&ddev->link, &emac_init_list);
2001 } else {
2002 emac_init_device(ocpdev, mal);
2003 }
2004 2188
2005 return 0; 2189 return 0;
2190 out6:
2191 iounmap((void *)dev->emacp);
2192 out5:
2193 tah_fini(dev->tah_dev);
2194 out4:
2195 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2196 out3:
2197 zmii_fini(dev->zmii_dev, dev->zmii_input);
2198 out2:
2199 mal_unregister_commac(dev->mal, &dev->commac);
2200 out:
2201 kfree(ndev);
2202 return err;
2006} 2203}
2007 2204
2008/* Structure for a device driver */
2009static struct ocp_device_id emac_ids[] = { 2205static struct ocp_device_id emac_ids[] = {
2010 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_EMAC}, 2206 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2011 {.vendor = OCP_VENDOR_INVALID} 2207 { .vendor = OCP_VENDOR_INVALID}
2012}; 2208};
2013 2209
2014static struct ocp_driver emac_driver = { 2210static struct ocp_driver emac_driver = {
2015 .name = "emac", 2211 .name = "emac",
2016 .id_table = emac_ids, 2212 .id_table = emac_ids,
2017
2018 .probe = emac_probe, 2213 .probe = emac_probe,
2019 .remove = emac_remove, 2214 .remove = emac_remove,
2020}; 2215};
2021 2216
2022static int __init emac_init(void) 2217static int __init emac_init(void)
2023{ 2218{
2024 printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n"); 2219 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2025 printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n"); 2220
2221 DBG(": init" NL);
2026 2222
2027 if (skb_res > 2) { 2223 if (mal_init())
2028 printk(KERN_WARNING "Invalid skb_res: %d, cropping to 2\n", 2224 return -ENODEV;
2029 skb_res); 2225
2030 skb_res = 2; 2226 EMAC_CLK_INTERNAL;
2227 if (ocp_register_driver(&emac_driver)) {
2228 EMAC_CLK_EXTERNAL;
2229 ocp_unregister_driver(&emac_driver);
2230 mal_exit();
2231 return -ENODEV;
2031 } 2232 }
2233 EMAC_CLK_EXTERNAL;
2032 2234
2033 return ocp_register_driver(&emac_driver); 2235 emac_init_debug();
2236 return 0;
2034} 2237}
2035 2238
2036static void __exit emac_exit(void) 2239static void __exit emac_exit(void)
2037{ 2240{
2241 DBG(": exit" NL);
2038 ocp_unregister_driver(&emac_driver); 2242 ocp_unregister_driver(&emac_driver);
2243 mal_exit();
2244 emac_fini_debug();
2039} 2245}
2040 2246
2041module_init(emac_init); 2247module_init(emac_init);
diff --git a/drivers/net/ibm_emac/ibm_emac_core.h b/drivers/net/ibm_emac/ibm_emac_core.h
index 97e6e1ea8c89..e9b44d030ac3 100644
--- a/drivers/net/ibm_emac/ibm_emac_core.h
+++ b/drivers/net/ibm_emac/ibm_emac_core.h
@@ -1,146 +1,221 @@
1/* 1/*
2 * ibm_emac_core.h 2 * drivers/net/ibm_emac/ibm_emac_core.h
3 * 3 *
4 * Ethernet driver for the built in ethernet on the IBM 405 PowerPC 4 * Driver for PowerPC 4xx on-chip ethernet controller.
5 * processor.
6 * 5 *
7 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
8 * Sept, 2001 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
9 * 8 *
10 * Orignial driver 9 * Based on original work by
11 * Johnnie Peters 10 * Armin Kuster <akuster@mvista.com>
12 * jpeters@mvista.com 11 * Johnnie Peters <jpeters@mvista.com>
13 * 12 * Copyright 2000, 2001 MontaVista Softare Inc.
14 * Copyright 2000 MontaVista Softare Inc.
15 * 13 *
16 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your 16 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version. 17 * option) any later version.
18 *
20 */ 19 */
20#ifndef __IBM_EMAC_CORE_H_
21#define __IBM_EMAC_CORE_H_
21 22
22#ifndef _IBM_EMAC_CORE_H_ 23#include <linux/config.h>
23#define _IBM_EMAC_CORE_H_
24
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
25#include <linux/dma-mapping.h>
26#include <asm/ocp.h> 26#include <asm/ocp.h>
27#include <asm/mmu.h> /* For phys_addr_t */
28 27
29#include "ibm_emac.h" 28#include "ibm_emac.h"
30#include "ibm_emac_phy.h" 29#include "ibm_emac_phy.h"
31#include "ibm_emac_rgmii.h"
32#include "ibm_emac_zmii.h" 30#include "ibm_emac_zmii.h"
31#include "ibm_emac_rgmii.h"
33#include "ibm_emac_mal.h" 32#include "ibm_emac_mal.h"
34#include "ibm_emac_tah.h" 33#include "ibm_emac_tah.h"
35 34
36#ifndef CONFIG_IBM_EMAC_TXB 35#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
37#define NUM_TX_BUFF 64 36#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
38#define NUM_RX_BUFF 64
39#else
40#define NUM_TX_BUFF CONFIG_IBM_EMAC_TXB
41#define NUM_RX_BUFF CONFIG_IBM_EMAC_RXB
42#endif
43 37
44/* This does 16 byte alignment, exactly what we need. 38/* Simple sanity check */
45 * The packet length includes FCS, but we don't want to 39#if NUM_TX_BUFF > 256 || NUM_RX_BUFF > 256
46 * include that when passing upstream as it messes up 40#error Invalid number of buffer descriptors (greater than 256)
47 * bridging applications.
48 */
49#ifndef CONFIG_IBM_EMAC_SKBRES
50#define SKB_RES 2
51#else
52#define SKB_RES CONFIG_IBM_EMAC_SKBRES
53#endif 41#endif
54 42
55/* Note about alignement. alloc_skb() returns a cache line 43// XXX
56 * aligned buffer. However, dev_alloc_skb() will add 16 more 44#define EMAC_MIN_MTU 46
57 * bytes and "reserve" them, so our buffer will actually end 45#define EMAC_MAX_MTU 9000
58 * on a half cache line. What we do is to use directly 46
59 * alloc_skb, allocate 16 more bytes to match the total amount 47/* Maximum L2 header length (VLAN tagged, no FCS) */
60 * allocated by dev_alloc_skb(), but we don't reserve. 48#define EMAC_MTU_OVERHEAD (6 * 2 + 2 + 4)
49
50/* RX BD size for the given MTU */
51static inline int emac_rx_size(int mtu)
52{
53 if (mtu > ETH_DATA_LEN)
54 return MAL_MAX_RX_SIZE;
55 else
56 return mal_rx_size(ETH_DATA_LEN + EMAC_MTU_OVERHEAD);
57}
58
59#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
60
61#define EMAC_RX_SKB_HEADROOM \
62 EMAC_DMA_ALIGN(CONFIG_IBM_EMAC_RX_SKB_HEADROOM)
63
64/* Size of RX skb for the given MTU */
65static inline int emac_rx_skb_size(int mtu)
66{
67 int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu));
68 return EMAC_DMA_ALIGN(size + 2) + EMAC_RX_SKB_HEADROOM;
69}
70
71/* RX DMA sync size */
72static inline int emac_rx_sync_size(int mtu)
73{
74 return EMAC_DMA_ALIGN(emac_rx_size(mtu) + 2);
75}
76
77/* Driver statistcs is split into two parts to make it more cache friendly:
78 * - normal statistics (packet count, etc)
79 * - error statistics
80 *
81 * When statistics is requested by ethtool, these parts are concatenated,
82 * normal one goes first.
83 *
84 * Please, keep these structures in sync with emac_stats_keys.
61 */ 85 */
62#define MAX_NUM_BUF_DESC 255 86
63#define DESC_BUF_SIZE 4080 /* max 4096-16 */ 87/* Normal TX/RX Statistics */
64#define DESC_BUF_SIZE_REG (DESC_BUF_SIZE / 16) 88struct ibm_emac_stats {
65 89 u64 rx_packets;
66/* Transmitter timeout. */ 90 u64 rx_bytes;
67#define TX_TIMEOUT (2*HZ) 91 u64 tx_packets;
68 92 u64 tx_bytes;
69/* MDIO latency delay */ 93 u64 rx_packets_csum;
70#define MDIO_DELAY 250 94 u64 tx_packets_csum;
71 95};
72/* Power managment shift registers */ 96
73#define IBM_CPM_EMMII 0 /* Shift value for MII */ 97/* Error statistics */
74#define IBM_CPM_EMRX 1 /* Shift value for recv */ 98struct ibm_emac_error_stats {
75#define IBM_CPM_EMTX 2 /* Shift value for MAC */ 99 u64 tx_undo;
76#define IBM_CPM_EMAC(x) (((x)>>IBM_CPM_EMMII) | ((x)>>IBM_CPM_EMRX) | ((x)>>IBM_CPM_EMTX)) 100
77 101 /* Software RX Errors */
78#define ENET_HEADER_SIZE 14 102 u64 rx_dropped_stack;
79#define ENET_FCS_SIZE 4 103 u64 rx_dropped_oom;
80#define ENET_DEF_MTU_SIZE 1500 104 u64 rx_dropped_error;
81#define ENET_DEF_BUF_SIZE (ENET_DEF_MTU_SIZE + ENET_HEADER_SIZE + ENET_FCS_SIZE) 105 u64 rx_dropped_resize;
82#define EMAC_MIN_FRAME 64 106 u64 rx_dropped_mtu;
83#define EMAC_MAX_FRAME 9018 107 u64 rx_stopped;
84#define EMAC_MIN_MTU (EMAC_MIN_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) 108 /* BD reported RX errors */
85#define EMAC_MAX_MTU (EMAC_MAX_FRAME - ENET_HEADER_SIZE - ENET_FCS_SIZE) 109 u64 rx_bd_errors;
86 110 u64 rx_bd_overrun;
87#ifdef CONFIG_IBM_EMAC_ERRMSG 111 u64 rx_bd_bad_packet;
88void emac_serr_dump_0(struct net_device *dev); 112 u64 rx_bd_runt_packet;
89void emac_serr_dump_1(struct net_device *dev); 113 u64 rx_bd_short_event;
90void emac_err_dump(struct net_device *dev, int em0isr); 114 u64 rx_bd_alignment_error;
91void emac_phy_dump(struct net_device *); 115 u64 rx_bd_bad_fcs;
92void emac_desc_dump(struct net_device *); 116 u64 rx_bd_packet_too_long;
93void emac_mac_dump(struct net_device *); 117 u64 rx_bd_out_of_range;
94void emac_mal_dump(struct net_device *); 118 u64 rx_bd_in_range;
95#else 119 /* EMAC IRQ reported RX errors */
96#define emac_serr_dump_0(dev) do { } while (0) 120 u64 rx_parity;
97#define emac_serr_dump_1(dev) do { } while (0) 121 u64 rx_fifo_overrun;
98#define emac_err_dump(dev,x) do { } while (0) 122 u64 rx_overrun;
99#define emac_phy_dump(dev) do { } while (0) 123 u64 rx_bad_packet;
100#define emac_desc_dump(dev) do { } while (0) 124 u64 rx_runt_packet;
101#define emac_mac_dump(dev) do { } while (0) 125 u64 rx_short_event;
102#define emac_mal_dump(dev) do { } while (0) 126 u64 rx_alignment_error;
103#endif 127 u64 rx_bad_fcs;
128 u64 rx_packet_too_long;
129 u64 rx_out_of_range;
130 u64 rx_in_range;
131
132 /* Software TX Errors */
133 u64 tx_dropped;
134 /* BD reported TX errors */
135 u64 tx_bd_errors;
136 u64 tx_bd_bad_fcs;
137 u64 tx_bd_carrier_loss;
138 u64 tx_bd_excessive_deferral;
139 u64 tx_bd_excessive_collisions;
140 u64 tx_bd_late_collision;
141 u64 tx_bd_multple_collisions;
142 u64 tx_bd_single_collision;
143 u64 tx_bd_underrun;
144 u64 tx_bd_sqe;
145 /* EMAC IRQ reported TX errors */
146 u64 tx_parity;
147 u64 tx_underrun;
148 u64 tx_sqe;
149 u64 tx_errors;
150};
151
152#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct ibm_emac_stats) + \
153 sizeof(struct ibm_emac_error_stats)) \
154 / sizeof(u64))
104 155
105struct ocp_enet_private { 156struct ocp_enet_private {
106 struct sk_buff *tx_skb[NUM_TX_BUFF]; 157 struct net_device *ndev; /* 0 */
107 struct sk_buff *rx_skb[NUM_RX_BUFF]; 158 struct emac_regs *emacp;
108 struct mal_descriptor *tx_desc; 159
109 struct mal_descriptor *rx_desc; 160 struct mal_descriptor *tx_desc;
110 struct mal_descriptor *rx_dirty; 161 int tx_cnt;
111 struct net_device_stats stats; 162 int tx_slot;
112 int tx_cnt; 163 int ack_slot;
113 int rx_slot; 164
114 int dirty_rx; 165 struct mal_descriptor *rx_desc;
115 int tx_slot; 166 int rx_slot;
116 int ack_slot; 167 struct sk_buff *rx_sg_skb; /* 1 */
117 int rx_buffer_size; 168 int rx_skb_size;
118 169 int rx_sync_size;
119 struct mii_phy phy_mii; 170
120 int mii_phy_addr; 171 struct ibm_emac_stats stats;
121 int want_autoneg; 172 struct ocp_device *tah_dev;
122 int timer_ticks; 173
123 struct timer_list link_timer; 174 struct ibm_ocp_mal *mal;
124 struct net_device *mdio_dev; 175 struct mal_commac commac;
125 176
126 struct ocp_device *rgmii_dev; 177 struct sk_buff *tx_skb[NUM_TX_BUFF];
127 int rgmii_input; 178 struct sk_buff *rx_skb[NUM_RX_BUFF];
128 179
129 struct ocp_device *zmii_dev; 180 struct ocp_device *zmii_dev;
130 int zmii_input; 181 int zmii_input;
131 182 struct ocp_enet_private *mdio_dev;
132 struct ibm_ocp_mal *mal; 183 struct ocp_device *rgmii_dev;
133 int mal_tx_chan, mal_rx_chan; 184 int rgmii_input;
134 struct mal_commac commac; 185
135 186 struct ocp_def *def;
136 struct ocp_device *tah_dev; 187
137 188 struct mii_phy phy;
138 int opened; 189 struct timer_list link_timer;
139 int going_away; 190 int reset_failed;
140 int wol_irq; 191
141 emac_t *emacp; 192 struct ibm_emac_error_stats estats;
142 struct ocp_device *ocpdev; 193 struct net_device_stats nstats;
143 struct net_device *ndev; 194
144 spinlock_t lock; 195 struct device* ldev;
145}; 196};
146#endif /* _IBM_EMAC_CORE_H_ */ 197
198/* Ethtool get_regs complex data.
199 * We want to get not just EMAC registers, but also MAL, ZMII, RGMII, TAH
200 * when available.
201 *
202 * Returned BLOB consists of the ibm_emac_ethtool_regs_hdr,
203 * MAL registers, EMAC registers and optional ZMII, RGMII, TAH registers.
204 * Each register component is preceded with emac_ethtool_regs_subhdr.
205 * Order of the optional headers follows their relative bit posititions
206 * in emac_ethtool_regs_hdr.components
207 */
208#define EMAC_ETHTOOL_REGS_ZMII 0x00000001
209#define EMAC_ETHTOOL_REGS_RGMII 0x00000002
210#define EMAC_ETHTOOL_REGS_TAH 0x00000004
211
212struct emac_ethtool_regs_hdr {
213 u32 components;
214};
215
216struct emac_ethtool_regs_subhdr {
217 u32 version;
218 u32 index;
219};
220
221#endif /* __IBM_EMAC_CORE_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.c b/drivers/net/ibm_emac/ibm_emac_debug.c
index c8512046cf84..75d3b8639041 100644
--- a/drivers/net/ibm_emac/ibm_emac_debug.c
+++ b/drivers/net/ibm_emac/ibm_emac_debug.c
@@ -1,224 +1,213 @@
1/* 1/*
2 * ibm_ocp_debug.c 2 * drivers/net/ibm_emac/ibm_emac_debug.c
3 * 3 *
4 * This has all the debug routines that where in *_enet.c 4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 * 5 *
6 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies
7 * April , 2002 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Copyright 2002 MontaVista Softare Inc.
10 * 8 *
11 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 12 * option) any later version.
13 *
15 */ 14 */
16
17#include <linux/config.h> 15#include <linux/config.h>
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/sysrq.h>
20#include <asm/io.h> 21#include <asm/io.h>
21#include "ibm_ocp_mal.h"
22#include "ibm_ocp_zmii.h"
23#include "ibm_ocp_enet.h"
24 22
25extern int emac_phy_read(struct net_device *dev, int mii_id, int reg); 23#include "ibm_emac_core.h"
24
25static void emac_desc_dump(int idx, struct ocp_enet_private *p)
26{
27 int i;
28 printk("** EMAC%d TX BDs **\n"
29 " tx_cnt = %d tx_slot = %d ack_slot = %d\n",
30 idx, p->tx_cnt, p->tx_slot, p->ack_slot);
31 for (i = 0; i < NUM_TX_BUFF / 2; ++i)
32 printk
33 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
34 i, p->tx_desc[i].data_ptr, p->tx_skb[i] ? 'V' : ' ',
35 p->tx_desc[i].ctrl, p->tx_desc[i].data_len,
36 NUM_TX_BUFF / 2 + i,
37 p->tx_desc[NUM_TX_BUFF / 2 + i].data_ptr,
38 p->tx_skb[NUM_TX_BUFF / 2 + i] ? 'V' : ' ',
39 p->tx_desc[NUM_TX_BUFF / 2 + i].ctrl,
40 p->tx_desc[NUM_TX_BUFF / 2 + i].data_len);
41
42 printk("** EMAC%d RX BDs **\n"
43 " rx_slot = %d rx_stopped = %d rx_skb_size = %d rx_sync_size = %d\n"
44 " rx_sg_skb = 0x%p\n",
45 idx, p->rx_slot, p->commac.rx_stopped, p->rx_skb_size,
46 p->rx_sync_size, p->rx_sg_skb);
47 for (i = 0; i < NUM_RX_BUFF / 2; ++i)
48 printk
49 ("bd[%2d] 0x%08x %c 0x%04x %4u - bd[%2d] 0x%08x %c 0x%04x %4u\n",
50 i, p->rx_desc[i].data_ptr, p->rx_skb[i] ? 'V' : ' ',
51 p->rx_desc[i].ctrl, p->rx_desc[i].data_len,
52 NUM_RX_BUFF / 2 + i,
53 p->rx_desc[NUM_RX_BUFF / 2 + i].data_ptr,
54 p->rx_skb[NUM_RX_BUFF / 2 + i] ? 'V' : ' ',
55 p->rx_desc[NUM_RX_BUFF / 2 + i].ctrl,
56 p->rx_desc[NUM_RX_BUFF / 2 + i].data_len);
57}
58
59static void emac_mac_dump(int idx, struct ocp_enet_private *dev)
60{
61 struct emac_regs *p = dev->emacp;
62
63 printk("** EMAC%d registers **\n"
64 "MR0 = 0x%08x MR1 = 0x%08x TMR0 = 0x%08x TMR1 = 0x%08x\n"
65 "RMR = 0x%08x ISR = 0x%08x ISER = 0x%08x\n"
66 "IAR = %04x%08x VTPID = 0x%04x VTCI = 0x%04x\n"
67 "IAHT: 0x%04x 0x%04x 0x%04x 0x%04x "
68 "GAHT: 0x%04x 0x%04x 0x%04x 0x%04x\n"
69 "LSA = %04x%08x IPGVR = 0x%04x\n"
70 "STACR = 0x%08x TRTR = 0x%08x RWMR = 0x%08x\n"
71 "OCTX = 0x%08x OCRX = 0x%08x IPCR = 0x%08x\n",
72 idx, in_be32(&p->mr0), in_be32(&p->mr1),
73 in_be32(&p->tmr0), in_be32(&p->tmr1),
74 in_be32(&p->rmr), in_be32(&p->isr), in_be32(&p->iser),
75 in_be32(&p->iahr), in_be32(&p->ialr), in_be32(&p->vtpid),
76 in_be32(&p->vtci),
77 in_be32(&p->iaht1), in_be32(&p->iaht2), in_be32(&p->iaht3),
78 in_be32(&p->iaht4),
79 in_be32(&p->gaht1), in_be32(&p->gaht2), in_be32(&p->gaht3),
80 in_be32(&p->gaht4),
81 in_be32(&p->lsah), in_be32(&p->lsal), in_be32(&p->ipgvr),
82 in_be32(&p->stacr), in_be32(&p->trtr), in_be32(&p->rwmr),
83 in_be32(&p->octx), in_be32(&p->ocrx), in_be32(&p->ipcr)
84 );
85
86 emac_desc_dump(idx, dev);
87}
88
89static void emac_mal_dump(struct ibm_ocp_mal *mal)
90{
91 struct ocp_func_mal_data *maldata = mal->def->additions;
92 int i;
93
94 printk("** MAL%d Registers **\n"
95 "CFG = 0x%08x ESR = 0x%08x IER = 0x%08x\n"
96 "TX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n"
97 "RX|CASR = 0x%08x CARR = 0x%08x EOBISR = 0x%08x DEIR = 0x%08x\n",
98 mal->def->index,
99 get_mal_dcrn(mal, MAL_CFG), get_mal_dcrn(mal, MAL_ESR),
100 get_mal_dcrn(mal, MAL_IER),
101 get_mal_dcrn(mal, MAL_TXCASR), get_mal_dcrn(mal, MAL_TXCARR),
102 get_mal_dcrn(mal, MAL_TXEOBISR), get_mal_dcrn(mal, MAL_TXDEIR),
103 get_mal_dcrn(mal, MAL_RXCASR), get_mal_dcrn(mal, MAL_RXCARR),
104 get_mal_dcrn(mal, MAL_RXEOBISR), get_mal_dcrn(mal, MAL_RXDEIR)
105 );
106
107 printk("TX|");
108 for (i = 0; i < maldata->num_tx_chans; ++i) {
109 if (i && !(i % 4))
110 printk("\n ");
111 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_TXCTPR(i)));
112 }
113 printk("\nRX|");
114 for (i = 0; i < maldata->num_rx_chans; ++i) {
115 if (i && !(i % 4))
116 printk("\n ");
117 printk("CTP%d = 0x%08x ", i, get_mal_dcrn(mal, MAL_RXCTPR(i)));
118 }
119 printk("\n ");
120 for (i = 0; i < maldata->num_rx_chans; ++i) {
121 u32 r = get_mal_dcrn(mal, MAL_RCBS(i));
122 if (i && !(i % 3))
123 printk("\n ");
124 printk("RCBS%d = 0x%08x (%d) ", i, r, r * 16);
125 }
126 printk("\n");
127}
128
129static struct ocp_enet_private *__emacs[4];
130static struct ibm_ocp_mal *__mals[1];
26 131
27void emac_phy_dump(struct net_device *dev) 132void emac_dbg_register(int idx, struct ocp_enet_private *dev)
28{ 133{
29 struct ocp_enet_private *fep = dev->priv; 134 unsigned long flags;
30 unsigned long i; 135
31 uint data; 136 if (idx >= sizeof(__emacs) / sizeof(__emacs[0])) {
32 137 printk(KERN_WARNING
33 printk(KERN_DEBUG " Prepare for Phy dump....\n"); 138 "invalid index %d when registering EMAC for debugging\n",
34 for (i = 0; i < 0x1A; i++) { 139 idx);
35 data = emac_phy_read(dev, fep->mii_phy_addr, i); 140 return;
36 printk(KERN_DEBUG "Phy reg 0x%lx ==> %4x\n", i, data);
37 if (i == 0x07)
38 i = 0x0f;
39 } 141 }
142
143 local_irq_save(flags);
144 __emacs[idx] = dev;
145 local_irq_restore(flags);
40} 146}
41 147
42void emac_desc_dump(struct net_device *dev) 148void mal_dbg_register(int idx, struct ibm_ocp_mal *mal)
43{ 149{
44 struct ocp_enet_private *fep = dev->priv; 150 unsigned long flags;
45 int curr_slot; 151
46 152 if (idx >= sizeof(__mals) / sizeof(__mals[0])) {
47 printk(KERN_DEBUG 153 printk(KERN_WARNING
48 "dumping the receive descriptors: current slot is %d\n", 154 "invalid index %d when registering MAL for debugging\n",
49 fep->rx_slot); 155 idx);
50 for (curr_slot = 0; curr_slot < NUM_RX_BUFF; curr_slot++) { 156 return;
51 printk(KERN_DEBUG
52 "Desc %02d: status 0x%04x, length %3d, addr 0x%x\n",
53 curr_slot, fep->rx_desc[curr_slot].ctrl,
54 fep->rx_desc[curr_slot].data_len,
55 (unsigned int)fep->rx_desc[curr_slot].data_ptr);
56 } 157 }
158
159 local_irq_save(flags);
160 __mals[idx] = mal;
161 local_irq_restore(flags);
57} 162}
58 163
59void emac_mac_dump(struct net_device *dev) 164void emac_dbg_dump_all(void)
60{ 165{
61 struct ocp_enet_private *fep = dev->priv; 166 unsigned int i;
62 volatile emac_t *emacp = fep->emacp; 167 unsigned long flags;
63 168
64 printk(KERN_DEBUG "EMAC DEBUG ********** \n"); 169 local_irq_save(flags);
65 printk(KERN_DEBUG "EMAC_M0 ==> 0x%x\n", in_be32(&emacp->em0mr0)); 170
66 printk(KERN_DEBUG "EMAC_M1 ==> 0x%x\n", in_be32(&emacp->em0mr1)); 171 for (i = 0; i < sizeof(__mals) / sizeof(__mals[0]); ++i)
67 printk(KERN_DEBUG "EMAC_TXM0==> 0x%x\n", in_be32(&emacp->em0tmr0)); 172 if (__mals[i])
68 printk(KERN_DEBUG "EMAC_TXM1==> 0x%x\n", in_be32(&emacp->em0tmr1)); 173 emac_mal_dump(__mals[i]);
69 printk(KERN_DEBUG "EMAC_RXM ==> 0x%x\n", in_be32(&emacp->em0rmr)); 174
70 printk(KERN_DEBUG "EMAC_ISR ==> 0x%x\n", in_be32(&emacp->em0isr)); 175 for (i = 0; i < sizeof(__emacs) / sizeof(__emacs[0]); ++i)
71 printk(KERN_DEBUG "EMAC_IER ==> 0x%x\n", in_be32(&emacp->em0iser)); 176 if (__emacs[i])
72 printk(KERN_DEBUG "EMAC_IAH ==> 0x%x\n", in_be32(&emacp->em0iahr)); 177 emac_mac_dump(i, __emacs[i]);
73 printk(KERN_DEBUG "EMAC_IAL ==> 0x%x\n", in_be32(&emacp->em0ialr)); 178
74 printk(KERN_DEBUG "EMAC_VLAN_TPID_REG ==> 0x%x\n", 179 local_irq_restore(flags);
75 in_be32(&emacp->em0vtpid));
76} 180}
77 181
78void emac_mal_dump(struct net_device *dev) 182#if defined(CONFIG_MAGIC_SYSRQ)
183static void emac_sysrq_handler(int key, struct pt_regs *pt_regs,
184 struct tty_struct *tty)
79{ 185{
80 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 186 emac_dbg_dump_all();
81
82 printk(KERN_DEBUG " MAL DEBUG ********** \n");
83 printk(KERN_DEBUG " MCR ==> 0x%x\n",
84 (unsigned int)get_mal_dcrn(mal, DCRN_MALCR));
85 printk(KERN_DEBUG " ESR ==> 0x%x\n",
86 (unsigned int)get_mal_dcrn(mal, DCRN_MALESR));
87 printk(KERN_DEBUG " IER ==> 0x%x\n",
88 (unsigned int)get_mal_dcrn(mal, DCRN_MALIER));
89#ifdef CONFIG_40x
90 printk(KERN_DEBUG " DBR ==> 0x%x\n",
91 (unsigned int)get_mal_dcrn(mal, DCRN_MALDBR));
92#endif /* CONFIG_40x */
93 printk(KERN_DEBUG " TXCASR ==> 0x%x\n",
94 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCASR));
95 printk(KERN_DEBUG " TXCARR ==> 0x%x\n",
96 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCARR));
97 printk(KERN_DEBUG " TXEOBISR ==> 0x%x\n",
98 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXEOBISR));
99 printk(KERN_DEBUG " TXDEIR ==> 0x%x\n",
100 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXDEIR));
101 printk(KERN_DEBUG " RXCASR ==> 0x%x\n",
102 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCASR));
103 printk(KERN_DEBUG " RXCARR ==> 0x%x\n",
104 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCARR));
105 printk(KERN_DEBUG " RXEOBISR ==> 0x%x\n",
106 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXEOBISR));
107 printk(KERN_DEBUG " RXDEIR ==> 0x%x\n",
108 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXDEIR));
109 printk(KERN_DEBUG " TXCTP0R ==> 0x%x\n",
110 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP0R));
111 printk(KERN_DEBUG " TXCTP1R ==> 0x%x\n",
112 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP1R));
113 printk(KERN_DEBUG " TXCTP2R ==> 0x%x\n",
114 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP2R));
115 printk(KERN_DEBUG " TXCTP3R ==> 0x%x\n",
116 (unsigned int)get_mal_dcrn(mal, DCRN_MALTXCTP3R));
117 printk(KERN_DEBUG " RXCTP0R ==> 0x%x\n",
118 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP0R));
119 printk(KERN_DEBUG " RXCTP1R ==> 0x%x\n",
120 (unsigned int)get_mal_dcrn(mal, DCRN_MALRXCTP1R));
121 printk(KERN_DEBUG " RCBS0 ==> 0x%x\n",
122 (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS0));
123 printk(KERN_DEBUG " RCBS1 ==> 0x%x\n",
124 (unsigned int)get_mal_dcrn(mal, DCRN_MALRCBS1));
125} 187}
126 188
127void emac_serr_dump_0(struct net_device *dev) 189static struct sysrq_key_op emac_sysrq_op = {
190 .handler = emac_sysrq_handler,
191 .help_msg = "emaC",
192 .action_msg = "Show EMAC(s) status",
193};
194
195int __init emac_init_debug(void)
128{ 196{
129 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 197 return register_sysrq_key('c', &emac_sysrq_op);
130 unsigned long int mal_error, plb_error, plb_addr;
131
132 mal_error = get_mal_dcrn(mal, DCRN_MALESR);
133 printk(KERN_DEBUG "ppc405_eth_serr: %s channel %ld \n",
134 (mal_error & 0x40000000) ? "Receive" :
135 "Transmit", (mal_error & 0x3e000000) >> 25);
136 printk(KERN_DEBUG " ----- latched error -----\n");
137 if (mal_error & MALESR_DE)
138 printk(KERN_DEBUG " DE: descriptor error\n");
139 if (mal_error & MALESR_OEN)
140 printk(KERN_DEBUG " ONE: OPB non-fullword error\n");
141 if (mal_error & MALESR_OTE)
142 printk(KERN_DEBUG " OTE: OPB timeout error\n");
143 if (mal_error & MALESR_OSE)
144 printk(KERN_DEBUG " OSE: OPB slave error\n");
145
146 if (mal_error & MALESR_PEIN) {
147 plb_error = mfdcr(DCRN_PLB0_BESR);
148 printk(KERN_DEBUG
149 " PEIN: PLB error, PLB0_BESR is 0x%x\n",
150 (unsigned int)plb_error);
151 plb_addr = mfdcr(DCRN_PLB0_BEAR);
152 printk(KERN_DEBUG
153 " PEIN: PLB error, PLB0_BEAR is 0x%x\n",
154 (unsigned int)plb_addr);
155 }
156} 198}
157 199
158void emac_serr_dump_1(struct net_device *dev) 200void __exit emac_fini_debug(void)
159{ 201{
160 struct ibm_ocp_mal *mal = ((struct ocp_enet_private *)dev->priv)->mal; 202 unregister_sysrq_key('c', &emac_sysrq_op);
161 int mal_error = get_mal_dcrn(mal, DCRN_MALESR);
162
163 printk(KERN_DEBUG " ----- cumulative errors -----\n");
164 if (mal_error & MALESR_DEI)
165 printk(KERN_DEBUG " DEI: descriptor error interrupt\n");
166 if (mal_error & MALESR_ONEI)
167 printk(KERN_DEBUG " OPB non-fullword error interrupt\n");
168 if (mal_error & MALESR_OTEI)
169 printk(KERN_DEBUG " OTEI: timeout error interrupt\n");
170 if (mal_error & MALESR_OSEI)
171 printk(KERN_DEBUG " OSEI: slave error interrupt\n");
172 if (mal_error & MALESR_PBEI)
173 printk(KERN_DEBUG " PBEI: PLB bus error interrupt\n");
174} 203}
175 204
176void emac_err_dump(struct net_device *dev, int em0isr) 205#else
206int __init emac_init_debug(void)
207{
208 return 0;
209}
210void __exit emac_fini_debug(void)
177{ 211{
178 printk(KERN_DEBUG "%s: on-chip ethernet error:\n", dev->name);
179
180 if (em0isr & EMAC_ISR_OVR)
181 printk(KERN_DEBUG " OVR: overrun\n");
182 if (em0isr & EMAC_ISR_PP)
183 printk(KERN_DEBUG " PP: control pause packet\n");
184 if (em0isr & EMAC_ISR_BP)
185 printk(KERN_DEBUG " BP: packet error\n");
186 if (em0isr & EMAC_ISR_RP)
187 printk(KERN_DEBUG " RP: runt packet\n");
188 if (em0isr & EMAC_ISR_SE)
189 printk(KERN_DEBUG " SE: short event\n");
190 if (em0isr & EMAC_ISR_ALE)
191 printk(KERN_DEBUG " ALE: odd number of nibbles in packet\n");
192 if (em0isr & EMAC_ISR_BFCS)
193 printk(KERN_DEBUG " BFCS: bad FCS\n");
194 if (em0isr & EMAC_ISR_PTLE)
195 printk(KERN_DEBUG " PTLE: oversized packet\n");
196 if (em0isr & EMAC_ISR_ORE)
197 printk(KERN_DEBUG
198 " ORE: packet length field > max allowed LLC\n");
199 if (em0isr & EMAC_ISR_IRE)
200 printk(KERN_DEBUG " IRE: In Range error\n");
201 if (em0isr & EMAC_ISR_DBDM)
202 printk(KERN_DEBUG " DBDM: xmit error or SQE\n");
203 if (em0isr & EMAC_ISR_DB0)
204 printk(KERN_DEBUG " DB0: xmit error or SQE on TX channel 0\n");
205 if (em0isr & EMAC_ISR_SE0)
206 printk(KERN_DEBUG
207 " SE0: Signal Quality Error test failure from TX channel 0\n");
208 if (em0isr & EMAC_ISR_TE0)
209 printk(KERN_DEBUG " TE0: xmit channel 0 aborted\n");
210 if (em0isr & EMAC_ISR_DB1)
211 printk(KERN_DEBUG " DB1: xmit error or SQE on TX channel \n");
212 if (em0isr & EMAC_ISR_SE1)
213 printk(KERN_DEBUG
214 " SE1: Signal Quality Error test failure from TX channel 1\n");
215 if (em0isr & EMAC_ISR_TE1)
216 printk(KERN_DEBUG " TE1: xmit channel 1 aborted\n");
217 if (em0isr & EMAC_ISR_MOS)
218 printk(KERN_DEBUG " MOS\n");
219 if (em0isr & EMAC_ISR_MOF)
220 printk(KERN_DEBUG " MOF\n");
221
222 emac_mac_dump(dev);
223 emac_mal_dump(dev);
224} 212}
213#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/drivers/net/ibm_emac/ibm_emac_debug.h b/drivers/net/ibm_emac/ibm_emac_debug.h
new file mode 100644
index 000000000000..e85fbe0a8da9
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_debug.h
@@ -0,0 +1,63 @@
1/*
2 * drivers/net/ibm_emac/ibm_ocp_debug.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, debug print routines.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 */
15#ifndef __IBM_EMAC_DEBUG_H_
16#define __IBM_EMAC_DEBUG_H_
17
18#include <linux/config.h>
19#include <linux/init.h>
20#include "ibm_emac_core.h"
21#include "ibm_emac_mal.h"
22
23#if defined(CONFIG_IBM_EMAC_DEBUG)
24void emac_dbg_register(int idx, struct ocp_enet_private *dev);
25void mal_dbg_register(int idx, struct ibm_ocp_mal *mal);
26int emac_init_debug(void) __init;
27void emac_fini_debug(void) __exit;
28void emac_dbg_dump_all(void);
29# define DBG_LEVEL 1
30#else
31# define emac_dbg_register(x,y) ((void)0)
32# define mal_dbg_register(x,y) ((void)0)
33# define emac_init_debug() ((void)0)
34# define emac_fini_debug() ((void)0)
35# define emac_dbg_dump_all() ((void)0)
36# define DBG_LEVEL 0
37#endif
38
39#if DBG_LEVEL > 0
40# define DBG(f,x...) printk("emac" f, ##x)
41# define MAL_DBG(f,x...) printk("mal" f, ##x)
42# define ZMII_DBG(f,x...) printk("zmii" f, ##x)
43# define RGMII_DBG(f,x...) printk("rgmii" f, ##x)
44# define NL "\n"
45#else
46# define DBG(f,x...) ((void)0)
47# define MAL_DBG(f,x...) ((void)0)
48# define ZMII_DBG(f,x...) ((void)0)
49# define RGMII_DBG(f,x...) ((void)0)
50#endif
51#if DBG_LEVEL > 1
52# define DBG2(f,x...) DBG(f, ##x)
53# define MAL_DBG2(f,x...) MAL_DBG(f, ##x)
54# define ZMII_DBG2(f,x...) ZMII_DBG(f, ##x)
55# define RGMII_DBG2(f,x...) RGMII_DBG(f, ##x)
56#else
57# define DBG2(f,x...) ((void)0)
58# define MAL_DBG2(f,x...) ((void)0)
59# define ZMII_DBG2(f,x...) ((void)0)
60# define RGMII_DBG2(f,x...) ((void)0)
61#endif
62
63#endif /* __IBM_EMAC_DEBUG_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c
index e59f57f363ca..da88d43081cc 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.c
+++ b/drivers/net/ibm_emac/ibm_emac_mal.c
@@ -1,436 +1,565 @@
1/* 1/*
2 * ibm_ocp_mal.c 2 * drivers/net/ibm_emac/ibm_emac_mal.c
3 * 3 *
4 * Armin Kuster akuster@mvista.com 4 * Memory Access Layer (MAL) support
5 * Juen, 2002 5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
6 * 8 *
7 * Copyright 2002 MontaVista Softare Inc. 9 * Based on original work by
10 * Benjamin Herrenschmidt <benh@kernel.crashing.org>,
11 * David Gibson <hermes@gibson.dropbear.id.au>,
12 *
13 * Armin Kuster <akuster@mvista.com>
14 * Copyright 2002 MontaVista Softare Inc.
8 * 15 *
9 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your 18 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 19 * option) any later version.
20 *
13 */ 21 */
14
15#include <linux/config.h> 22#include <linux/config.h>
16#include <linux/module.h> 23#include <linux/module.h>
17#include <linux/kernel.h> 24#include <linux/kernel.h>
18#include <linux/errno.h> 25#include <linux/errno.h>
19#include <linux/netdevice.h> 26#include <linux/netdevice.h>
20#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/interrupt.h>
21#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
22 30
23#include <asm/io.h>
24#include <asm/irq.h>
25#include <asm/ocp.h> 31#include <asm/ocp.h>
26 32
33#include "ibm_emac_core.h"
27#include "ibm_emac_mal.h" 34#include "ibm_emac_mal.h"
35#include "ibm_emac_debug.h"
28 36
29// Locking: Should we share a lock with the client ? The client could provide 37int __init mal_register_commac(struct ibm_ocp_mal *mal,
30// a lock pointer (optionally) in the commac structure... I don't think this is 38 struct mal_commac *commac)
31// really necessary though
32
33/* This lock protects the commac list. On today UP implementations, it's
34 * really only used as IRQ protection in mal_{register,unregister}_commac()
35 */
36static DEFINE_RWLOCK(mal_list_lock);
37
38int mal_register_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac)
39{ 39{
40 unsigned long flags; 40 unsigned long flags;
41 local_irq_save(flags);
41 42
42 write_lock_irqsave(&mal_list_lock, flags); 43 MAL_DBG("%d: reg(%08x, %08x)" NL, mal->def->index,
44 commac->tx_chan_mask, commac->rx_chan_mask);
43 45
44 /* Don't let multiple commacs claim the same channel */ 46 /* Don't let multiple commacs claim the same channel(s) */
45 if ((mal->tx_chan_mask & commac->tx_chan_mask) || 47 if ((mal->tx_chan_mask & commac->tx_chan_mask) ||
46 (mal->rx_chan_mask & commac->rx_chan_mask)) { 48 (mal->rx_chan_mask & commac->rx_chan_mask)) {
47 write_unlock_irqrestore(&mal_list_lock, flags); 49 local_irq_restore(flags);
50 printk(KERN_WARNING "mal%d: COMMAC channels conflict!\n",
51 mal->def->index);
48 return -EBUSY; 52 return -EBUSY;
49 } 53 }
50 54
51 mal->tx_chan_mask |= commac->tx_chan_mask; 55 mal->tx_chan_mask |= commac->tx_chan_mask;
52 mal->rx_chan_mask |= commac->rx_chan_mask; 56 mal->rx_chan_mask |= commac->rx_chan_mask;
57 list_add(&commac->list, &mal->list);
53 58
54 list_add(&commac->list, &mal->commac); 59 local_irq_restore(flags);
55
56 write_unlock_irqrestore(&mal_list_lock, flags);
57
58 return 0; 60 return 0;
59} 61}
60 62
61int mal_unregister_commac(struct ibm_ocp_mal *mal, struct mal_commac *commac) 63void __exit mal_unregister_commac(struct ibm_ocp_mal *mal,
64 struct mal_commac *commac)
62{ 65{
63 unsigned long flags; 66 unsigned long flags;
67 local_irq_save(flags);
64 68
65 write_lock_irqsave(&mal_list_lock, flags); 69 MAL_DBG("%d: unreg(%08x, %08x)" NL, mal->def->index,
70 commac->tx_chan_mask, commac->rx_chan_mask);
66 71
67 mal->tx_chan_mask &= ~commac->tx_chan_mask; 72 mal->tx_chan_mask &= ~commac->tx_chan_mask;
68 mal->rx_chan_mask &= ~commac->rx_chan_mask; 73 mal->rx_chan_mask &= ~commac->rx_chan_mask;
69
70 list_del_init(&commac->list); 74 list_del_init(&commac->list);
71 75
72 write_unlock_irqrestore(&mal_list_lock, flags); 76 local_irq_restore(flags);
73
74 return 0;
75} 77}
76 78
77int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size) 79int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size)
78{ 80{
79 switch (channel) { 81 struct ocp_func_mal_data *maldata = mal->def->additions;
80 case 0: 82 BUG_ON(channel < 0 || channel >= maldata->num_rx_chans ||
81 set_mal_dcrn(mal, DCRN_MALRCBS0, size); 83 size > MAL_MAX_RX_SIZE);
82 break; 84
83#ifdef DCRN_MALRCBS1 85 MAL_DBG("%d: set_rbcs(%d, %lu)" NL, mal->def->index, channel, size);
84 case 1: 86
85 set_mal_dcrn(mal, DCRN_MALRCBS1, size); 87 if (size & 0xf) {
86 break; 88 printk(KERN_WARNING
87#endif 89 "mal%d: incorrect RX size %lu for the channel %d\n",
88#ifdef DCRN_MALRCBS2 90 mal->def->index, size, channel);
89 case 2:
90 set_mal_dcrn(mal, DCRN_MALRCBS2, size);
91 break;
92#endif
93#ifdef DCRN_MALRCBS3
94 case 3:
95 set_mal_dcrn(mal, DCRN_MALRCBS3, size);
96 break;
97#endif
98 default:
99 return -EINVAL; 91 return -EINVAL;
100 } 92 }
101 93
94 set_mal_dcrn(mal, MAL_RCBS(channel), size >> 4);
102 return 0; 95 return 0;
103} 96}
104 97
105static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs) 98int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel)
106{ 99{
107 struct ibm_ocp_mal *mal = dev_instance; 100 struct ocp_func_mal_data *maldata = mal->def->additions;
108 unsigned long mal_error; 101 BUG_ON(channel < 0 || channel >= maldata->num_tx_chans);
102 return channel * NUM_TX_BUFF;
103}
109 104
110 /* 105int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel)
111 * This SERR applies to one of the devices on the MAL, here we charge 106{
112 * it against the first EMAC registered for the MAL. 107 struct ocp_func_mal_data *maldata = mal->def->additions;
113 */ 108 BUG_ON(channel < 0 || channel >= maldata->num_rx_chans);
109 return maldata->num_tx_chans * NUM_TX_BUFF + channel * NUM_RX_BUFF;
110}
114 111
115 mal_error = get_mal_dcrn(mal, DCRN_MALESR); 112void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel)
113{
114 local_bh_disable();
115 MAL_DBG("%d: enable_tx(%d)" NL, mal->def->index, channel);
116 set_mal_dcrn(mal, MAL_TXCASR,
117 get_mal_dcrn(mal, MAL_TXCASR) | MAL_CHAN_MASK(channel));
118 local_bh_enable();
119}
116 120
117 printk(KERN_ERR "%s: System Error (MALESR=%lx)\n", 121void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel)
118 "MAL" /* FIXME: get the name right */ , mal_error); 122{
123 set_mal_dcrn(mal, MAL_TXCARR, MAL_CHAN_MASK(channel));
124 MAL_DBG("%d: disable_tx(%d)" NL, mal->def->index, channel);
125}
119 126
120 /* FIXME: decipher error */ 127void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel)
121 /* DIXME: distribute to commacs, if possible */ 128{
129 local_bh_disable();
130 MAL_DBG("%d: enable_rx(%d)" NL, mal->def->index, channel);
131 set_mal_dcrn(mal, MAL_RXCASR,
132 get_mal_dcrn(mal, MAL_RXCASR) | MAL_CHAN_MASK(channel));
133 local_bh_enable();
134}
122 135
123 /* Clear the error status register */ 136void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel)
124 set_mal_dcrn(mal, DCRN_MALESR, mal_error); 137{
138 set_mal_dcrn(mal, MAL_RXCARR, MAL_CHAN_MASK(channel));
139 MAL_DBG("%d: disable_rx(%d)" NL, mal->def->index, channel);
140}
125 141
126 return IRQ_HANDLED; 142void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac)
143{
144 local_bh_disable();
145 MAL_DBG("%d: poll_add(%p)" NL, mal->def->index, commac);
146 list_add_tail(&commac->poll_list, &mal->poll_list);
147 local_bh_enable();
127} 148}
128 149
129static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs) 150void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac)
151{
152 local_bh_disable();
153 MAL_DBG("%d: poll_del(%p)" NL, mal->def->index, commac);
154 list_del(&commac->poll_list);
155 local_bh_enable();
156}
157
158/* synchronized by mal_poll() */
159static inline void mal_enable_eob_irq(struct ibm_ocp_mal *mal)
160{
161 MAL_DBG2("%d: enable_irq" NL, mal->def->index);
162 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE);
163}
164
165/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */
166static inline void mal_disable_eob_irq(struct ibm_ocp_mal *mal)
167{
168 set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) & ~MAL_CFG_EOPIE);
169 MAL_DBG2("%d: disable_irq" NL, mal->def->index);
170}
171
172static irqreturn_t mal_serr(int irq, void *dev_instance, struct pt_regs *regs)
130{ 173{
131 struct ibm_ocp_mal *mal = dev_instance; 174 struct ibm_ocp_mal *mal = dev_instance;
132 struct list_head *l; 175 u32 esr = get_mal_dcrn(mal, MAL_ESR);
133 unsigned long isr;
134 176
135 isr = get_mal_dcrn(mal, DCRN_MALTXEOBISR); 177 /* Clear the error status register */
136 set_mal_dcrn(mal, DCRN_MALTXEOBISR, isr); 178 set_mal_dcrn(mal, MAL_ESR, esr);
137 179
138 read_lock(&mal_list_lock); 180 MAL_DBG("%d: SERR %08x" NL, mal->def->index, esr);
139 list_for_each(l, &mal->commac) {
140 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
141 181
142 if (isr & mc->tx_chan_mask) { 182 if (esr & MAL_ESR_EVB) {
143 mc->ops->txeob(mc->dev, isr & mc->tx_chan_mask); 183 if (esr & MAL_ESR_DE) {
184 /* We ignore Descriptor error,
185 * TXDE or RXDE interrupt will be generated anyway.
186 */
187 return IRQ_HANDLED;
144 } 188 }
189
190 if (esr & MAL_ESR_PEIN) {
191 /* PLB error, it's probably buggy hardware or
192 * incorrect physical address in BD (i.e. bug)
193 */
194 if (net_ratelimit())
195 printk(KERN_ERR
196 "mal%d: system error, PLB (ESR = 0x%08x)\n",
197 mal->def->index, esr);
198 return IRQ_HANDLED;
199 }
200
201 /* OPB error, it's probably buggy hardware or incorrect EBC setup */
202 if (net_ratelimit())
203 printk(KERN_ERR
204 "mal%d: system error, OPB (ESR = 0x%08x)\n",
205 mal->def->index, esr);
145 } 206 }
146 read_unlock(&mal_list_lock); 207 return IRQ_HANDLED;
208}
209
210static inline void mal_schedule_poll(struct ibm_ocp_mal *mal)
211{
212 if (likely(netif_rx_schedule_prep(&mal->poll_dev))) {
213 MAL_DBG2("%d: schedule_poll" NL, mal->def->index);
214 mal_disable_eob_irq(mal);
215 __netif_rx_schedule(&mal->poll_dev);
216 } else
217 MAL_DBG2("%d: already in poll" NL, mal->def->index);
218}
147 219
220static irqreturn_t mal_txeob(int irq, void *dev_instance, struct pt_regs *regs)
221{
222 struct ibm_ocp_mal *mal = dev_instance;
223 u32 r = get_mal_dcrn(mal, MAL_TXEOBISR);
224 MAL_DBG2("%d: txeob %08x" NL, mal->def->index, r);
225 mal_schedule_poll(mal);
226 set_mal_dcrn(mal, MAL_TXEOBISR, r);
148 return IRQ_HANDLED; 227 return IRQ_HANDLED;
149} 228}
150 229
151static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs) 230static irqreturn_t mal_rxeob(int irq, void *dev_instance, struct pt_regs *regs)
152{ 231{
153 struct ibm_ocp_mal *mal = dev_instance; 232 struct ibm_ocp_mal *mal = dev_instance;
154 struct list_head *l; 233 u32 r = get_mal_dcrn(mal, MAL_RXEOBISR);
155 unsigned long isr; 234 MAL_DBG2("%d: rxeob %08x" NL, mal->def->index, r);
235 mal_schedule_poll(mal);
236 set_mal_dcrn(mal, MAL_RXEOBISR, r);
237 return IRQ_HANDLED;
238}
156 239
157 isr = get_mal_dcrn(mal, DCRN_MALRXEOBISR); 240static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs)
158 set_mal_dcrn(mal, DCRN_MALRXEOBISR, isr); 241{
242 struct ibm_ocp_mal *mal = dev_instance;
243 u32 deir = get_mal_dcrn(mal, MAL_TXDEIR);
244 set_mal_dcrn(mal, MAL_TXDEIR, deir);
159 245
160 read_lock(&mal_list_lock); 246 MAL_DBG("%d: txde %08x" NL, mal->def->index, deir);
161 list_for_each(l, &mal->commac) {
162 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
163 247
164 if (isr & mc->rx_chan_mask) { 248 if (net_ratelimit())
165 mc->ops->rxeob(mc->dev, isr & mc->rx_chan_mask); 249 printk(KERN_ERR
166 } 250 "mal%d: TX descriptor error (TXDEIR = 0x%08x)\n",
167 } 251 mal->def->index, deir);
168 read_unlock(&mal_list_lock);
169 252
170 return IRQ_HANDLED; 253 return IRQ_HANDLED;
171} 254}
172 255
173static irqreturn_t mal_txde(int irq, void *dev_instance, struct pt_regs *regs) 256static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
174{ 257{
175 struct ibm_ocp_mal *mal = dev_instance; 258 struct ibm_ocp_mal *mal = dev_instance;
176 struct list_head *l; 259 struct list_head *l;
177 unsigned long deir; 260 u32 deir = get_mal_dcrn(mal, MAL_RXDEIR);
178 261
179 deir = get_mal_dcrn(mal, DCRN_MALTXDEIR); 262 MAL_DBG("%d: rxde %08x" NL, mal->def->index, deir);
180 263
181 /* FIXME: print which MAL correctly */ 264 list_for_each(l, &mal->list) {
182 printk(KERN_WARNING "%s: Tx descriptor error (MALTXDEIR=%lx)\n",
183 "MAL", deir);
184
185 read_lock(&mal_list_lock);
186 list_for_each(l, &mal->commac) {
187 struct mal_commac *mc = list_entry(l, struct mal_commac, list); 265 struct mal_commac *mc = list_entry(l, struct mal_commac, list);
188 266 if (deir & mc->rx_chan_mask) {
189 if (deir & mc->tx_chan_mask) { 267 mc->rx_stopped = 1;
190 mc->ops->txde(mc->dev, deir & mc->tx_chan_mask); 268 mc->ops->rxde(mc->dev);
191 } 269 }
192 } 270 }
193 read_unlock(&mal_list_lock); 271
272 mal_schedule_poll(mal);
273 set_mal_dcrn(mal, MAL_RXDEIR, deir);
194 274
195 return IRQ_HANDLED; 275 return IRQ_HANDLED;
196} 276}
197 277
198/* 278static int mal_poll(struct net_device *ndev, int *budget)
199 * This interrupt should be very rare at best. This occurs when
200 * the hardware has a problem with the receive descriptors. The manual
201 * states that it occurs when the hardware cannot the receive descriptor
202 * empty bit is not set. The recovery mechanism will be to
203 * traverse through the descriptors, handle any that are marked to be
204 * handled and reinitialize each along the way. At that point the driver
205 * will be restarted.
206 */
207static irqreturn_t mal_rxde(int irq, void *dev_instance, struct pt_regs *regs)
208{ 279{
209 struct ibm_ocp_mal *mal = dev_instance; 280 struct ibm_ocp_mal *mal = ndev->priv;
210 struct list_head *l; 281 struct list_head *l;
211 unsigned long deir; 282 int rx_work_limit = min(ndev->quota, *budget), received = 0, done;
212 283
213 deir = get_mal_dcrn(mal, DCRN_MALRXDEIR); 284 MAL_DBG2("%d: poll(%d) %d ->" NL, mal->def->index, *budget,
285 rx_work_limit);
286 again:
287 /* Process TX skbs */
288 list_for_each(l, &mal->poll_list) {
289 struct mal_commac *mc =
290 list_entry(l, struct mal_commac, poll_list);
291 mc->ops->poll_tx(mc->dev);
292 }
214 293
215 /* 294 /* Process RX skbs.
216 * This really is needed. This case encountered in stress testing. 295 * We _might_ need something more smart here to enforce polling fairness.
217 */ 296 */
218 if (deir == 0) 297 list_for_each(l, &mal->poll_list) {
219 return IRQ_HANDLED; 298 struct mal_commac *mc =
220 299 list_entry(l, struct mal_commac, poll_list);
221 /* FIXME: print which MAL correctly */ 300 int n = mc->ops->poll_rx(mc->dev, rx_work_limit);
222 printk(KERN_WARNING "%s: Rx descriptor error (MALRXDEIR=%lx)\n", 301 if (n) {
223 "MAL", deir); 302 received += n;
224 303 rx_work_limit -= n;
225 read_lock(&mal_list_lock); 304 if (rx_work_limit <= 0) {
226 list_for_each(l, &mal->commac) { 305 done = 0;
227 struct mal_commac *mc = list_entry(l, struct mal_commac, list); 306 goto more_work; // XXX What if this is the last one ?
307 }
308 }
309 }
228 310
229 if (deir & mc->rx_chan_mask) { 311 /* We need to disable IRQs to protect from RXDE IRQ here */
230 mc->ops->rxde(mc->dev, deir & mc->rx_chan_mask); 312 local_irq_disable();
313 __netif_rx_complete(ndev);
314 mal_enable_eob_irq(mal);
315 local_irq_enable();
316
317 done = 1;
318
319 /* Check for "rotting" packet(s) */
320 list_for_each(l, &mal->poll_list) {
321 struct mal_commac *mc =
322 list_entry(l, struct mal_commac, poll_list);
323 if (unlikely(mc->ops->peek_rx(mc->dev) || mc->rx_stopped)) {
324 MAL_DBG2("%d: rotting packet" NL, mal->def->index);
325 if (netif_rx_reschedule(ndev, received))
326 mal_disable_eob_irq(mal);
327 else
328 MAL_DBG2("%d: already in poll list" NL,
329 mal->def->index);
330
331 if (rx_work_limit > 0)
332 goto again;
333 else
334 goto more_work;
231 } 335 }
336 mc->ops->poll_tx(mc->dev);
232 } 337 }
233 read_unlock(&mal_list_lock);
234 338
235 return IRQ_HANDLED; 339 more_work:
340 ndev->quota -= received;
341 *budget -= received;
342
343 MAL_DBG2("%d: poll() %d <- %d" NL, mal->def->index, *budget,
344 done ? 0 : 1);
345 return done ? 0 : 1;
346}
347
348static void mal_reset(struct ibm_ocp_mal *mal)
349{
350 int n = 10;
351 MAL_DBG("%d: reset" NL, mal->def->index);
352
353 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_SR);
354
355 /* Wait for reset to complete (1 system clock) */
356 while ((get_mal_dcrn(mal, MAL_CFG) & MAL_CFG_SR) && n)
357 --n;
358
359 if (unlikely(!n))
360 printk(KERN_ERR "mal%d: reset timeout\n", mal->def->index);
361}
362
363int mal_get_regs_len(struct ibm_ocp_mal *mal)
364{
365 return sizeof(struct emac_ethtool_regs_subhdr) +
366 sizeof(struct ibm_mal_regs);
367}
368
369void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf)
370{
371 struct emac_ethtool_regs_subhdr *hdr = buf;
372 struct ibm_mal_regs *regs = (struct ibm_mal_regs *)(hdr + 1);
373 struct ocp_func_mal_data *maldata = mal->def->additions;
374 int i;
375
376 hdr->version = MAL_VERSION;
377 hdr->index = mal->def->index;
378
379 regs->tx_count = maldata->num_tx_chans;
380 regs->rx_count = maldata->num_rx_chans;
381
382 regs->cfg = get_mal_dcrn(mal, MAL_CFG);
383 regs->esr = get_mal_dcrn(mal, MAL_ESR);
384 regs->ier = get_mal_dcrn(mal, MAL_IER);
385 regs->tx_casr = get_mal_dcrn(mal, MAL_TXCASR);
386 regs->tx_carr = get_mal_dcrn(mal, MAL_TXCARR);
387 regs->tx_eobisr = get_mal_dcrn(mal, MAL_TXEOBISR);
388 regs->tx_deir = get_mal_dcrn(mal, MAL_TXDEIR);
389 regs->rx_casr = get_mal_dcrn(mal, MAL_RXCASR);
390 regs->rx_carr = get_mal_dcrn(mal, MAL_RXCARR);
391 regs->rx_eobisr = get_mal_dcrn(mal, MAL_RXEOBISR);
392 regs->rx_deir = get_mal_dcrn(mal, MAL_RXDEIR);
393
394 for (i = 0; i < regs->tx_count; ++i)
395 regs->tx_ctpr[i] = get_mal_dcrn(mal, MAL_TXCTPR(i));
396
397 for (i = 0; i < regs->rx_count; ++i) {
398 regs->rx_ctpr[i] = get_mal_dcrn(mal, MAL_RXCTPR(i));
399 regs->rcbs[i] = get_mal_dcrn(mal, MAL_RCBS(i));
400 }
401 return regs + 1;
236} 402}
237 403
238static int __init mal_probe(struct ocp_device *ocpdev) 404static int __init mal_probe(struct ocp_device *ocpdev)
239{ 405{
240 struct ibm_ocp_mal *mal = NULL; 406 struct ibm_ocp_mal *mal;
241 struct ocp_func_mal_data *maldata; 407 struct ocp_func_mal_data *maldata;
242 int err = 0; 408 int err = 0, i, bd_size;
409
410 MAL_DBG("%d: probe" NL, ocpdev->def->index);
243 411
244 maldata = (struct ocp_func_mal_data *)ocpdev->def->additions; 412 maldata = ocpdev->def->additions;
245 if (maldata == NULL) { 413 if (maldata == NULL) {
246 printk(KERN_ERR "mal%d: Missing additional datas !\n", 414 printk(KERN_ERR "mal%d: missing additional data!\n",
247 ocpdev->def->index); 415 ocpdev->def->index);
248 return -ENODEV; 416 return -ENODEV;
249 } 417 }
250 418
251 mal = kmalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL); 419 mal = kzalloc(sizeof(struct ibm_ocp_mal), GFP_KERNEL);
252 if (mal == NULL) { 420 if (!mal) {
253 printk(KERN_ERR 421 printk(KERN_ERR
254 "mal%d: Out of memory allocating MAL structure !\n", 422 "mal%d: out of memory allocating MAL structure!\n",
255 ocpdev->def->index); 423 ocpdev->def->index);
256 return -ENOMEM; 424 return -ENOMEM;
257 } 425 }
258 memset(mal, 0, sizeof(*mal)); 426 mal->dcrbase = maldata->dcr_base;
259 427 mal->def = ocpdev->def;
260 switch (ocpdev->def->index) {
261 case 0:
262 mal->dcrbase = DCRN_MAL_BASE;
263 break;
264#ifdef DCRN_MAL1_BASE
265 case 1:
266 mal->dcrbase = DCRN_MAL1_BASE;
267 break;
268#endif
269 default:
270 BUG();
271 }
272
273 /**************************/
274 428
275 INIT_LIST_HEAD(&mal->commac); 429 INIT_LIST_HEAD(&mal->poll_list);
430 set_bit(__LINK_STATE_START, &mal->poll_dev.state);
431 mal->poll_dev.weight = CONFIG_IBM_EMAC_POLL_WEIGHT;
432 mal->poll_dev.poll = mal_poll;
433 mal->poll_dev.priv = mal;
434 atomic_set(&mal->poll_dev.refcnt, 1);
276 435
277 set_mal_dcrn(mal, DCRN_MALRXCARR, 0xFFFFFFFF); 436 INIT_LIST_HEAD(&mal->list);
278 set_mal_dcrn(mal, DCRN_MALTXCARR, 0xFFFFFFFF);
279 437
280 set_mal_dcrn(mal, DCRN_MALCR, MALCR_MMSR); /* 384 */ 438 /* Load power-on reset defaults */
281 /* FIXME: Add delay */ 439 mal_reset(mal);
282 440
283 /* Set the MAL configuration register */ 441 /* Set the MAL configuration register */
284 set_mal_dcrn(mal, DCRN_MALCR, 442 set_mal_dcrn(mal, MAL_CFG, MAL_CFG_DEFAULT | MAL_CFG_PLBB |
285 MALCR_PLBB | MALCR_OPBBL | MALCR_LEA | 443 MAL_CFG_OPBBL | MAL_CFG_LEA);
286 MALCR_PLBLT_DEFAULT); 444
287 445 mal_enable_eob_irq(mal);
288 /* It would be nice to allocate buffers separately for each 446
289 * channel, but we can't because the channels share the upper 447 /* Allocate space for BD rings */
290 * 13 bits of address lines. Each channels buffer must also 448 BUG_ON(maldata->num_tx_chans <= 0 || maldata->num_tx_chans > 32);
291 * be 4k aligned, so we allocate 4k for each channel. This is 449 BUG_ON(maldata->num_rx_chans <= 0 || maldata->num_rx_chans > 32);
292 * inefficient FIXME: do better, if possible */ 450 bd_size = sizeof(struct mal_descriptor) *
293 mal->tx_virt_addr = dma_alloc_coherent(&ocpdev->dev, 451 (NUM_TX_BUFF * maldata->num_tx_chans +
294 MAL_DT_ALIGN * 452 NUM_RX_BUFF * maldata->num_rx_chans);
295 maldata->num_tx_chans, 453 mal->bd_virt =
296 &mal->tx_phys_addr, GFP_KERNEL); 454 dma_alloc_coherent(&ocpdev->dev, bd_size, &mal->bd_dma, GFP_KERNEL);
297 if (mal->tx_virt_addr == NULL) { 455
456 if (!mal->bd_virt) {
298 printk(KERN_ERR 457 printk(KERN_ERR
299 "mal%d: Out of memory allocating MAL descriptors !\n", 458 "mal%d: out of memory allocating RX/TX descriptors!\n",
300 ocpdev->def->index); 459 mal->def->index);
301 err = -ENOMEM; 460 err = -ENOMEM;
302 goto fail; 461 goto fail;
303 } 462 }
463 memset(mal->bd_virt, 0, bd_size);
304 464
305 /* God, oh, god, I hate DCRs */ 465 for (i = 0; i < maldata->num_tx_chans; ++i)
306 set_mal_dcrn(mal, DCRN_MALTXCTP0R, mal->tx_phys_addr); 466 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
307#ifdef DCRN_MALTXCTP1R 467 sizeof(struct mal_descriptor) *
308 if (maldata->num_tx_chans > 1) 468 mal_tx_bd_offset(mal, i));
309 set_mal_dcrn(mal, DCRN_MALTXCTP1R, 469
310 mal->tx_phys_addr + MAL_DT_ALIGN); 470 for (i = 0; i < maldata->num_rx_chans; ++i)
311#endif /* DCRN_MALTXCTP1R */ 471 set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
312#ifdef DCRN_MALTXCTP2R 472 sizeof(struct mal_descriptor) *
313 if (maldata->num_tx_chans > 2) 473 mal_rx_bd_offset(mal, i));
314 set_mal_dcrn(mal, DCRN_MALTXCTP2R,
315 mal->tx_phys_addr + 2 * MAL_DT_ALIGN);
316#endif /* DCRN_MALTXCTP2R */
317#ifdef DCRN_MALTXCTP3R
318 if (maldata->num_tx_chans > 3)
319 set_mal_dcrn(mal, DCRN_MALTXCTP3R,
320 mal->tx_phys_addr + 3 * MAL_DT_ALIGN);
321#endif /* DCRN_MALTXCTP3R */
322#ifdef DCRN_MALTXCTP4R
323 if (maldata->num_tx_chans > 4)
324 set_mal_dcrn(mal, DCRN_MALTXCTP4R,
325 mal->tx_phys_addr + 4 * MAL_DT_ALIGN);
326#endif /* DCRN_MALTXCTP4R */
327#ifdef DCRN_MALTXCTP5R
328 if (maldata->num_tx_chans > 5)
329 set_mal_dcrn(mal, DCRN_MALTXCTP5R,
330 mal->tx_phys_addr + 5 * MAL_DT_ALIGN);
331#endif /* DCRN_MALTXCTP5R */
332#ifdef DCRN_MALTXCTP6R
333 if (maldata->num_tx_chans > 6)
334 set_mal_dcrn(mal, DCRN_MALTXCTP6R,
335 mal->tx_phys_addr + 6 * MAL_DT_ALIGN);
336#endif /* DCRN_MALTXCTP6R */
337#ifdef DCRN_MALTXCTP7R
338 if (maldata->num_tx_chans > 7)
339 set_mal_dcrn(mal, DCRN_MALTXCTP7R,
340 mal->tx_phys_addr + 7 * MAL_DT_ALIGN);
341#endif /* DCRN_MALTXCTP7R */
342
343 mal->rx_virt_addr = dma_alloc_coherent(&ocpdev->dev,
344 MAL_DT_ALIGN *
345 maldata->num_rx_chans,
346 &mal->rx_phys_addr, GFP_KERNEL);
347
348 set_mal_dcrn(mal, DCRN_MALRXCTP0R, mal->rx_phys_addr);
349#ifdef DCRN_MALRXCTP1R
350 if (maldata->num_rx_chans > 1)
351 set_mal_dcrn(mal, DCRN_MALRXCTP1R,
352 mal->rx_phys_addr + MAL_DT_ALIGN);
353#endif /* DCRN_MALRXCTP1R */
354#ifdef DCRN_MALRXCTP2R
355 if (maldata->num_rx_chans > 2)
356 set_mal_dcrn(mal, DCRN_MALRXCTP2R,
357 mal->rx_phys_addr + 2 * MAL_DT_ALIGN);
358#endif /* DCRN_MALRXCTP2R */
359#ifdef DCRN_MALRXCTP3R
360 if (maldata->num_rx_chans > 3)
361 set_mal_dcrn(mal, DCRN_MALRXCTP3R,
362 mal->rx_phys_addr + 3 * MAL_DT_ALIGN);
363#endif /* DCRN_MALRXCTP3R */
364 474
365 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal); 475 err = request_irq(maldata->serr_irq, mal_serr, 0, "MAL SERR", mal);
366 if (err) 476 if (err)
367 goto fail; 477 goto fail2;
368 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE ", mal); 478 err = request_irq(maldata->txde_irq, mal_txde, 0, "MAL TX DE", mal);
369 if (err) 479 if (err)
370 goto fail; 480 goto fail3;
371 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal); 481 err = request_irq(maldata->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
372 if (err) 482 if (err)
373 goto fail; 483 goto fail4;
374 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal); 484 err = request_irq(maldata->rxde_irq, mal_rxde, 0, "MAL RX DE", mal);
375 if (err) 485 if (err)
376 goto fail; 486 goto fail5;
377 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal); 487 err = request_irq(maldata->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
378 if (err) 488 if (err)
379 goto fail; 489 goto fail6;
380 490
381 set_mal_dcrn(mal, DCRN_MALIER, 491 /* Enable all MAL SERR interrupt sources */
382 MALIER_DE | MALIER_NE | MALIER_TE | 492 set_mal_dcrn(mal, MAL_IER, MAL_IER_EVENTS);
383 MALIER_OPBE | MALIER_PLBE);
384 493
385 /* Advertise me to the rest of the world */ 494 /* Advertise this instance to the rest of the world */
386 ocp_set_drvdata(ocpdev, mal); 495 ocp_set_drvdata(ocpdev, mal);
387 496
388 printk(KERN_INFO "mal%d: Initialized, %d tx channels, %d rx channels\n", 497 mal_dbg_register(mal->def->index, mal);
389 ocpdev->def->index, maldata->num_tx_chans,
390 maldata->num_rx_chans);
391 498
499 printk(KERN_INFO "mal%d: initialized, %d TX channels, %d RX channels\n",
500 mal->def->index, maldata->num_tx_chans, maldata->num_rx_chans);
392 return 0; 501 return 0;
393 502
503 fail6:
504 free_irq(maldata->rxde_irq, mal);
505 fail5:
506 free_irq(maldata->txeob_irq, mal);
507 fail4:
508 free_irq(maldata->txde_irq, mal);
509 fail3:
510 free_irq(maldata->serr_irq, mal);
511 fail2:
512 dma_free_coherent(&ocpdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
394 fail: 513 fail:
395 /* FIXME: dispose requested IRQs ! */ 514 kfree(mal);
396 if (err && mal)
397 kfree(mal);
398 return err; 515 return err;
399} 516}
400 517
401static void __exit mal_remove(struct ocp_device *ocpdev) 518static void __exit mal_remove(struct ocp_device *ocpdev)
402{ 519{
403 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev); 520 struct ibm_ocp_mal *mal = ocp_get_drvdata(ocpdev);
404 struct ocp_func_mal_data *maldata = ocpdev->def->additions; 521 struct ocp_func_mal_data *maldata = mal->def->additions;
522
523 MAL_DBG("%d: remove" NL, mal->def->index);
405 524
406 BUG_ON(!maldata); 525 /* Syncronize with scheduled polling,
526 stolen from net/core/dev.c:dev_close()
527 */
528 clear_bit(__LINK_STATE_START, &mal->poll_dev.state);
529 netif_poll_disable(&mal->poll_dev);
530
531 if (!list_empty(&mal->list)) {
532 /* This is *very* bad */
533 printk(KERN_EMERG
534 "mal%d: commac list is not empty on remove!\n",
535 mal->def->index);
536 }
407 537
408 ocp_set_drvdata(ocpdev, NULL); 538 ocp_set_drvdata(ocpdev, NULL);
409 539
410 /* FIXME: shut down the MAL, deal with dependency with emac */
411 free_irq(maldata->serr_irq, mal); 540 free_irq(maldata->serr_irq, mal);
412 free_irq(maldata->txde_irq, mal); 541 free_irq(maldata->txde_irq, mal);
413 free_irq(maldata->txeob_irq, mal); 542 free_irq(maldata->txeob_irq, mal);
414 free_irq(maldata->rxde_irq, mal); 543 free_irq(maldata->rxde_irq, mal);
415 free_irq(maldata->rxeob_irq, mal); 544 free_irq(maldata->rxeob_irq, mal);
416 545
417 if (mal->tx_virt_addr) 546 mal_reset(mal);
418 dma_free_coherent(&ocpdev->dev,
419 MAL_DT_ALIGN * maldata->num_tx_chans,
420 mal->tx_virt_addr, mal->tx_phys_addr);
421 547
422 if (mal->rx_virt_addr) 548 mal_dbg_register(mal->def->index, NULL);
423 dma_free_coherent(&ocpdev->dev, 549
424 MAL_DT_ALIGN * maldata->num_rx_chans, 550 dma_free_coherent(&ocpdev->dev,
425 mal->rx_virt_addr, mal->rx_phys_addr); 551 sizeof(struct mal_descriptor) *
552 (NUM_TX_BUFF * maldata->num_tx_chans +
553 NUM_RX_BUFF * maldata->num_rx_chans), mal->bd_virt,
554 mal->bd_dma);
426 555
427 kfree(mal); 556 kfree(mal);
428} 557}
429 558
430/* Structure for a device driver */ 559/* Structure for a device driver */
431static struct ocp_device_id mal_ids[] = { 560static struct ocp_device_id mal_ids[] = {
432 {.vendor = OCP_ANY_ID,.function = OCP_FUNC_MAL}, 561 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_MAL },
433 {.vendor = OCP_VENDOR_INVALID} 562 { .vendor = OCP_VENDOR_INVALID}
434}; 563};
435 564
436static struct ocp_driver mal_driver = { 565static struct ocp_driver mal_driver = {
@@ -441,23 +570,14 @@ static struct ocp_driver mal_driver = {
441 .remove = mal_remove, 570 .remove = mal_remove,
442}; 571};
443 572
444static int __init init_mals(void) 573int __init mal_init(void)
445{ 574{
446 int rc; 575 MAL_DBG(": init" NL);
447 576 return ocp_register_driver(&mal_driver);
448 rc = ocp_register_driver(&mal_driver);
449 if (rc < 0) {
450 ocp_unregister_driver(&mal_driver);
451 return -ENODEV;
452 }
453
454 return 0;
455} 577}
456 578
457static void __exit exit_mals(void) 579void __exit mal_exit(void)
458{ 580{
581 MAL_DBG(": exit" NL);
459 ocp_unregister_driver(&mal_driver); 582 ocp_unregister_driver(&mal_driver);
460} 583}
461
462module_init(init_mals);
463module_exit(exit_mals);
diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h
index dd9f0dabc6e0..15b0bdae26ac 100644
--- a/drivers/net/ibm_emac/ibm_emac_mal.h
+++ b/drivers/net/ibm_emac/ibm_emac_mal.h
@@ -1,131 +1,267 @@
1#ifndef _IBM_EMAC_MAL_H 1/*
2#define _IBM_EMAC_MAL_H 2 * drivers/net/ibm_emac/ibm_emac_mal.h
3 *
4 * Memory Access Layer (MAL) support
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2002 MontaVista Softare Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __IBM_EMAC_MAL_H_
20#define __IBM_EMAC_MAL_H_
3 21
22#include <linux/config.h>
23#include <linux/init.h>
4#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/netdevice.h>
5 26
6#define MAL_DT_ALIGN (4096) /* Alignment for each channel's descriptor table */ 27#include <asm/io.h>
7 28
8#define MAL_CHAN_MASK(chan) (0x80000000 >> (chan)) 29/*
30 * These MAL "versions" probably aren't the real versions IBM uses for these
31 * MAL cores, I assigned them just to make #ifdefs in this file nicer and
32 * reflect the fact that 40x and 44x have slightly different MALs. --ebs
33 */
34#if defined(CONFIG_405GP) || defined(CONFIG_405GPR) || defined(CONFIG_405EP) || \
35 defined(CONFIG_440EP) || defined(CONFIG_NP405H)
36#define MAL_VERSION 1
37#elif defined(CONFIG_440GP) || defined(CONFIG_440GX) || defined(CONFIG_440SP)
38#define MAL_VERSION 2
39#else
40#error "Unknown SoC, please check chip manual and choose MAL 'version'"
41#endif
42
43/* MALx DCR registers */
44#define MAL_CFG 0x00
45#define MAL_CFG_SR 0x80000000
46#define MAL_CFG_PLBB 0x00004000
47#define MAL_CFG_OPBBL 0x00000080
48#define MAL_CFG_EOPIE 0x00000004
49#define MAL_CFG_LEA 0x00000002
50#define MAL_CFG_SD 0x00000001
51#if MAL_VERSION == 1
52#define MAL_CFG_PLBP_MASK 0x00c00000
53#define MAL_CFG_PLBP_10 0x00800000
54#define MAL_CFG_GA 0x00200000
55#define MAL_CFG_OA 0x00100000
56#define MAL_CFG_PLBLE 0x00080000
57#define MAL_CFG_PLBT_MASK 0x00078000
58#define MAL_CFG_DEFAULT (MAL_CFG_PLBP_10 | MAL_CFG_PLBT_MASK)
59#elif MAL_VERSION == 2
60#define MAL_CFG_RPP_MASK 0x00c00000
61#define MAL_CFG_RPP_10 0x00800000
62#define MAL_CFG_RMBS_MASK 0x00300000
63#define MAL_CFG_WPP_MASK 0x000c0000
64#define MAL_CFG_WPP_10 0x00080000
65#define MAL_CFG_WMBS_MASK 0x00030000
66#define MAL_CFG_PLBLE 0x00008000
67#define MAL_CFG_DEFAULT (MAL_CFG_RMBS_MASK | MAL_CFG_WMBS_MASK | \
68 MAL_CFG_RPP_10 | MAL_CFG_WPP_10)
69#else
70#error "Unknown MAL version"
71#endif
72
73#define MAL_ESR 0x01
74#define MAL_ESR_EVB 0x80000000
75#define MAL_ESR_CIDT 0x40000000
76#define MAL_ESR_CID_MASK 0x3e000000
77#define MAL_ESR_CID_SHIFT 25
78#define MAL_ESR_DE 0x00100000
79#define MAL_ESR_OTE 0x00040000
80#define MAL_ESR_OSE 0x00020000
81#define MAL_ESR_PEIN 0x00010000
82#define MAL_ESR_DEI 0x00000010
83#define MAL_ESR_OTEI 0x00000004
84#define MAL_ESR_OSEI 0x00000002
85#define MAL_ESR_PBEI 0x00000001
86#if MAL_VERSION == 1
87#define MAL_ESR_ONE 0x00080000
88#define MAL_ESR_ONEI 0x00000008
89#elif MAL_VERSION == 2
90#define MAL_ESR_PTE 0x00800000
91#define MAL_ESR_PRE 0x00400000
92#define MAL_ESR_PWE 0x00200000
93#define MAL_ESR_PTEI 0x00000080
94#define MAL_ESR_PREI 0x00000040
95#define MAL_ESR_PWEI 0x00000020
96#else
97#error "Unknown MAL version"
98#endif
99
100#define MAL_IER 0x02
101#define MAL_IER_DE 0x00000010
102#define MAL_IER_OTE 0x00000004
103#define MAL_IER_OE 0x00000002
104#define MAL_IER_PE 0x00000001
105#if MAL_VERSION == 1
106#define MAL_IER_NWE 0x00000008
107#define MAL_IER_SOC_EVENTS MAL_IER_NWE
108#elif MAL_VERSION == 2
109#define MAL_IER_PT 0x00000080
110#define MAL_IER_PRE 0x00000040
111#define MAL_IER_PWE 0x00000020
112#define MAL_IER_SOC_EVENTS (MAL_IER_PT | MAL_IER_PRE | MAL_IER_PWE)
113#else
114#error "Unknown MAL version"
115#endif
116#define MAL_IER_EVENTS (MAL_IER_SOC_EVENTS | MAL_IER_OTE | \
117 MAL_IER_OTE | MAL_IER_OE | MAL_IER_PE)
118
119#define MAL_TXCASR 0x04
120#define MAL_TXCARR 0x05
121#define MAL_TXEOBISR 0x06
122#define MAL_TXDEIR 0x07
123#define MAL_RXCASR 0x10
124#define MAL_RXCARR 0x11
125#define MAL_RXEOBISR 0x12
126#define MAL_RXDEIR 0x13
127#define MAL_TXCTPR(n) ((n) + 0x20)
128#define MAL_RXCTPR(n) ((n) + 0x40)
129#define MAL_RCBS(n) ((n) + 0x60)
130
131/* In reality MAL can handle TX buffers up to 4095 bytes long,
132 * but this isn't a good round number :) --ebs
133 */
134#define MAL_MAX_TX_SIZE 4080
135#define MAL_MAX_RX_SIZE 4080
136
137static inline int mal_rx_size(int len)
138{
139 len = (len + 0xf) & ~0xf;
140 return len > MAL_MAX_RX_SIZE ? MAL_MAX_RX_SIZE : len;
141}
142
143static inline int mal_tx_chunks(int len)
144{
145 return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE;
146}
147
148#define MAL_CHAN_MASK(n) (0x80000000 >> (n))
9 149
10/* MAL Buffer Descriptor structure */ 150/* MAL Buffer Descriptor structure */
11struct mal_descriptor { 151struct mal_descriptor {
12 unsigned short ctrl; /* MAL / Commac status control bits */ 152 u16 ctrl; /* MAL / Commac status control bits */
13 short data_len; /* Max length is 4K-1 (12 bits) */ 153 u16 data_len; /* Max length is 4K-1 (12 bits) */
14 unsigned char *data_ptr; /* pointer to actual data buffer */ 154 u32 data_ptr; /* pointer to actual data buffer */
15} __attribute__ ((packed)); 155};
16 156
17/* the following defines are for the MadMAL status and control registers. */ 157/* the following defines are for the MadMAL status and control registers. */
18/* MADMAL transmit and receive status/control bits */ 158/* MADMAL transmit and receive status/control bits */
19#define MAL_RX_CTRL_EMPTY 0x8000 159#define MAL_RX_CTRL_EMPTY 0x8000
20#define MAL_RX_CTRL_WRAP 0x4000 160#define MAL_RX_CTRL_WRAP 0x4000
21#define MAL_RX_CTRL_CM 0x2000 161#define MAL_RX_CTRL_CM 0x2000
22#define MAL_RX_CTRL_LAST 0x1000 162#define MAL_RX_CTRL_LAST 0x1000
23#define MAL_RX_CTRL_FIRST 0x0800 163#define MAL_RX_CTRL_FIRST 0x0800
24#define MAL_RX_CTRL_INTR 0x0400 164#define MAL_RX_CTRL_INTR 0x0400
25 165#define MAL_RX_CTRL_SINGLE (MAL_RX_CTRL_LAST | MAL_RX_CTRL_FIRST)
26#define MAL_TX_CTRL_READY 0x8000 166#define MAL_IS_SINGLE_RX(ctrl) (((ctrl) & MAL_RX_CTRL_SINGLE) == MAL_RX_CTRL_SINGLE)
27#define MAL_TX_CTRL_WRAP 0x4000 167
28#define MAL_TX_CTRL_CM 0x2000 168#define MAL_TX_CTRL_READY 0x8000
29#define MAL_TX_CTRL_LAST 0x1000 169#define MAL_TX_CTRL_WRAP 0x4000
30#define MAL_TX_CTRL_INTR 0x0400 170#define MAL_TX_CTRL_CM 0x2000
171#define MAL_TX_CTRL_LAST 0x1000
172#define MAL_TX_CTRL_INTR 0x0400
31 173
32struct mal_commac_ops { 174struct mal_commac_ops {
33 void (*txeob) (void *dev, u32 chanmask); 175 void (*poll_tx) (void *dev);
34 void (*txde) (void *dev, u32 chanmask); 176 int (*poll_rx) (void *dev, int budget);
35 void (*rxeob) (void *dev, u32 chanmask); 177 int (*peek_rx) (void *dev);
36 void (*rxde) (void *dev, u32 chanmask); 178 void (*rxde) (void *dev);
37}; 179};
38 180
39struct mal_commac { 181struct mal_commac {
40 struct mal_commac_ops *ops; 182 struct mal_commac_ops *ops;
41 void *dev; 183 void *dev;
42 u32 tx_chan_mask, rx_chan_mask; 184 struct list_head poll_list;
43 struct list_head list; 185 int rx_stopped;
186
187 u32 tx_chan_mask;
188 u32 rx_chan_mask;
189 struct list_head list;
44}; 190};
45 191
46struct ibm_ocp_mal { 192struct ibm_ocp_mal {
47 int dcrbase; 193 int dcrbase;
48 194
49 struct list_head commac; 195 struct list_head poll_list;
50 u32 tx_chan_mask, rx_chan_mask; 196 struct net_device poll_dev;
51 197
52 dma_addr_t tx_phys_addr; 198 struct list_head list;
53 struct mal_descriptor *tx_virt_addr; 199 u32 tx_chan_mask;
200 u32 rx_chan_mask;
54 201
55 dma_addr_t rx_phys_addr; 202 dma_addr_t bd_dma;
56 struct mal_descriptor *rx_virt_addr; 203 struct mal_descriptor *bd_virt;
57};
58 204
59#define GET_MAL_STANZA(base,dcrn) \ 205 struct ocp_def *def;
60 case base: \ 206};
61 x = mfdcr(dcrn(base)); \
62 break;
63
64#define SET_MAL_STANZA(base,dcrn, val) \
65 case base: \
66 mtdcr(dcrn(base), (val)); \
67 break;
68
69#define GET_MAL0_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL_BASE,dcrn)
70#define SET_MAL0_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL_BASE,dcrn,val)
71
72#ifdef DCRN_MAL1_BASE
73#define GET_MAL1_STANZA(dcrn) GET_MAL_STANZA(DCRN_MAL1_BASE,dcrn)
74#define SET_MAL1_STANZA(dcrn,val) SET_MAL_STANZA(DCRN_MAL1_BASE,dcrn,val)
75#else /* ! DCRN_MAL1_BASE */
76#define GET_MAL1_STANZA(dcrn)
77#define SET_MAL1_STANZA(dcrn,val)
78#endif
79 207
80#define get_mal_dcrn(mal, dcrn) ({ \ 208static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg)
81 u32 x; \
82 switch ((mal)->dcrbase) { \
83 GET_MAL0_STANZA(dcrn) \
84 GET_MAL1_STANZA(dcrn) \
85 default: \
86 x = 0; \
87 BUG(); \
88 } \
89x; })
90
91#define set_mal_dcrn(mal, dcrn, val) do { \
92 switch ((mal)->dcrbase) { \
93 SET_MAL0_STANZA(dcrn,val) \
94 SET_MAL1_STANZA(dcrn,val) \
95 default: \
96 BUG(); \
97 } } while (0)
98
99static inline void mal_enable_tx_channels(struct ibm_ocp_mal *mal, u32 chanmask)
100{ 209{
101 set_mal_dcrn(mal, DCRN_MALTXCASR, 210 return mfdcr(mal->dcrbase + reg);
102 get_mal_dcrn(mal, DCRN_MALTXCASR) | chanmask);
103} 211}
104 212
105static inline void mal_disable_tx_channels(struct ibm_ocp_mal *mal, 213static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val)
106 u32 chanmask)
107{ 214{
108 set_mal_dcrn(mal, DCRN_MALTXCARR, chanmask); 215 mtdcr(mal->dcrbase + reg, val);
109} 216}
110 217
111static inline void mal_enable_rx_channels(struct ibm_ocp_mal *mal, u32 chanmask) 218/* Register MAL devices */
112{ 219int mal_init(void) __init;
113 set_mal_dcrn(mal, DCRN_MALRXCASR, 220void mal_exit(void) __exit;
114 get_mal_dcrn(mal, DCRN_MALRXCASR) | chanmask);
115}
116 221
117static inline void mal_disable_rx_channels(struct ibm_ocp_mal *mal, 222int mal_register_commac(struct ibm_ocp_mal *mal,
118 u32 chanmask) 223 struct mal_commac *commac) __init;
119{ 224void mal_unregister_commac(struct ibm_ocp_mal *mal,
120 set_mal_dcrn(mal, DCRN_MALRXCARR, chanmask); 225 struct mal_commac *commac) __exit;
121} 226int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, unsigned long size);
227
228/* Returns BD ring offset for a particular channel
229 (in 'struct mal_descriptor' elements)
230*/
231int mal_tx_bd_offset(struct ibm_ocp_mal *mal, int channel);
232int mal_rx_bd_offset(struct ibm_ocp_mal *mal, int channel);
233
234void mal_enable_tx_channel(struct ibm_ocp_mal *mal, int channel);
235void mal_disable_tx_channel(struct ibm_ocp_mal *mal, int channel);
236void mal_enable_rx_channel(struct ibm_ocp_mal *mal, int channel);
237void mal_disable_rx_channel(struct ibm_ocp_mal *mal, int channel);
122 238
123extern int mal_register_commac(struct ibm_ocp_mal *mal, 239/* Add/remove EMAC to/from MAL polling list */
124 struct mal_commac *commac); 240void mal_poll_add(struct ibm_ocp_mal *mal, struct mal_commac *commac);
125extern int mal_unregister_commac(struct ibm_ocp_mal *mal, 241void mal_poll_del(struct ibm_ocp_mal *mal, struct mal_commac *commac);
126 struct mal_commac *commac); 242
243/* Ethtool MAL registers */
244struct ibm_mal_regs {
245 u32 tx_count;
246 u32 rx_count;
247
248 u32 cfg;
249 u32 esr;
250 u32 ier;
251 u32 tx_casr;
252 u32 tx_carr;
253 u32 tx_eobisr;
254 u32 tx_deir;
255 u32 rx_casr;
256 u32 rx_carr;
257 u32 rx_eobisr;
258 u32 rx_deir;
259 u32 tx_ctpr[32];
260 u32 rx_ctpr[32];
261 u32 rcbs[32];
262};
127 263
128extern int mal_set_rcbs(struct ibm_ocp_mal *mal, int channel, 264int mal_get_regs_len(struct ibm_ocp_mal *mal);
129 unsigned long size); 265void *mal_dump_regs(struct ibm_ocp_mal *mal, void *buf);
130 266
131#endif /* _IBM_EMAC_MAL_H */ 267#endif /* __IBM_EMAC_MAL_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.c b/drivers/net/ibm_emac/ibm_emac_phy.c
index 14213f090e91..a27e49cfe43b 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.c
+++ b/drivers/net/ibm_emac/ibm_emac_phy.c
@@ -1,96 +1,80 @@
1/* 1/*
2 * ibm_ocp_phy.c 2 * drivers/net/ibm_emac/ibm_emac_phy.c
3 * 3 *
4 * PHY drivers for the ibm ocp ethernet driver. Borrowed 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
5 * from sungem_phy.c, though I only kept the generic MII 5 * Borrowed from sungem_phy.c, though I only kept the generic MII
6 * driver for now. 6 * driver for now.
7 * 7 *
8 * This file should be shared with other drivers or eventually 8 * This file should be shared with other drivers or eventually
9 * merged as the "low level" part of miilib 9 * merged as the "low level" part of miilib
10 * 10 *
11 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org) 11 * (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
12 * (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
12 * 13 *
13 */ 14 */
14
15#include <linux/config.h> 15#include <linux/config.h>
16
17#include <linux/module.h> 16#include <linux/module.h>
18
19#include <linux/kernel.h> 17#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/types.h> 18#include <linux/types.h>
22#include <linux/netdevice.h> 19#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/mii.h> 20#include <linux/mii.h>
25#include <linux/ethtool.h> 21#include <linux/ethtool.h>
26#include <linux/delay.h> 22#include <linux/delay.h>
27 23
24#include <asm/ocp.h>
25
28#include "ibm_emac_phy.h" 26#include "ibm_emac_phy.h"
29 27
30static int reset_one_mii_phy(struct mii_phy *phy, int phy_id) 28static inline int phy_read(struct mii_phy *phy, int reg)
29{
30 return phy->mdio_read(phy->dev, phy->address, reg);
31}
32
33static inline void phy_write(struct mii_phy *phy, int reg, int val)
31{ 34{
32 u16 val; 35 phy->mdio_write(phy->dev, phy->address, reg, val);
36}
37
38int mii_reset_phy(struct mii_phy *phy)
39{
40 int val;
33 int limit = 10000; 41 int limit = 10000;
34 42
35 val = __phy_read(phy, phy_id, MII_BMCR); 43 val = phy_read(phy, MII_BMCR);
36 val &= ~BMCR_ISOLATE; 44 val &= ~BMCR_ISOLATE;
37 val |= BMCR_RESET; 45 val |= BMCR_RESET;
38 __phy_write(phy, phy_id, MII_BMCR, val); 46 phy_write(phy, MII_BMCR, val);
39 47
40 udelay(100); 48 udelay(300);
41 49
42 while (limit--) { 50 while (limit--) {
43 val = __phy_read(phy, phy_id, MII_BMCR); 51 val = phy_read(phy, MII_BMCR);
44 if ((val & BMCR_RESET) == 0) 52 if (val >= 0 && (val & BMCR_RESET) == 0)
45 break; 53 break;
46 udelay(10); 54 udelay(10);
47 } 55 }
48 if ((val & BMCR_ISOLATE) && limit > 0) 56 if ((val & BMCR_ISOLATE) && limit > 0)
49 __phy_write(phy, phy_id, MII_BMCR, val & ~BMCR_ISOLATE); 57 phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
50
51 return (limit <= 0);
52}
53
54static int cis8201_init(struct mii_phy *phy)
55{
56 u16 epcr;
57
58 epcr = phy_read(phy, MII_CIS8201_EPCR);
59 epcr &= ~EPCR_MODE_MASK;
60
61 switch (phy->mode) {
62 case PHY_MODE_TBI:
63 epcr |= EPCR_TBI_MODE;
64 break;
65 case PHY_MODE_RTBI:
66 epcr |= EPCR_RTBI_MODE;
67 break;
68 case PHY_MODE_GMII:
69 epcr |= EPCR_GMII_MODE;
70 break;
71 case PHY_MODE_RGMII:
72 default:
73 epcr |= EPCR_RGMII_MODE;
74 }
75 58
76 phy_write(phy, MII_CIS8201_EPCR, epcr); 59 return limit <= 0;
77
78 return 0;
79} 60}
80 61
81static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise) 62static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
82{ 63{
83 u16 ctl, adv; 64 int ctl, adv;
84 65
85 phy->autoneg = 1; 66 phy->autoneg = AUTONEG_ENABLE;
86 phy->speed = SPEED_10; 67 phy->speed = SPEED_10;
87 phy->duplex = DUPLEX_HALF; 68 phy->duplex = DUPLEX_HALF;
88 phy->pause = 0; 69 phy->pause = phy->asym_pause = 0;
89 phy->advertising = advertise; 70 phy->advertising = advertise;
90 71
91 /* Setup standard advertise */ 72 /* Setup standard advertise */
92 adv = phy_read(phy, MII_ADVERTISE); 73 adv = phy_read(phy, MII_ADVERTISE);
93 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4); 74 if (adv < 0)
75 return adv;
76 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
77 ADVERTISE_PAUSE_ASYM);
94 if (advertise & ADVERTISED_10baseT_Half) 78 if (advertise & ADVERTISED_10baseT_Half)
95 adv |= ADVERTISE_10HALF; 79 adv |= ADVERTISE_10HALF;
96 if (advertise & ADVERTISED_10baseT_Full) 80 if (advertise & ADVERTISED_10baseT_Full)
@@ -99,8 +83,25 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
99 adv |= ADVERTISE_100HALF; 83 adv |= ADVERTISE_100HALF;
100 if (advertise & ADVERTISED_100baseT_Full) 84 if (advertise & ADVERTISED_100baseT_Full)
101 adv |= ADVERTISE_100FULL; 85 adv |= ADVERTISE_100FULL;
86 if (advertise & ADVERTISED_Pause)
87 adv |= ADVERTISE_PAUSE_CAP;
88 if (advertise & ADVERTISED_Asym_Pause)
89 adv |= ADVERTISE_PAUSE_ASYM;
102 phy_write(phy, MII_ADVERTISE, adv); 90 phy_write(phy, MII_ADVERTISE, adv);
103 91
92 if (phy->features &
93 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
94 adv = phy_read(phy, MII_CTRL1000);
95 if (adv < 0)
96 return adv;
97 adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
98 if (advertise & ADVERTISED_1000baseT_Full)
99 adv |= ADVERTISE_1000FULL;
100 if (advertise & ADVERTISED_1000baseT_Half)
101 adv |= ADVERTISE_1000HALF;
102 phy_write(phy, MII_CTRL1000, adv);
103 }
104
104 /* Start/Restart aneg */ 105 /* Start/Restart aneg */
105 ctl = phy_read(phy, MII_BMCR); 106 ctl = phy_read(phy, MII_BMCR);
106 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART); 107 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
@@ -111,14 +112,16 @@ static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
111 112
112static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd) 113static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
113{ 114{
114 u16 ctl; 115 int ctl;
115 116
116 phy->autoneg = 0; 117 phy->autoneg = AUTONEG_DISABLE;
117 phy->speed = speed; 118 phy->speed = speed;
118 phy->duplex = fd; 119 phy->duplex = fd;
119 phy->pause = 0; 120 phy->pause = phy->asym_pause = 0;
120 121
121 ctl = phy_read(phy, MII_BMCR); 122 ctl = phy_read(phy, MII_BMCR);
123 if (ctl < 0)
124 return ctl;
122 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE); 125 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
123 126
124 /* First reset the PHY */ 127 /* First reset the PHY */
@@ -132,6 +135,8 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
132 ctl |= BMCR_SPEED100; 135 ctl |= BMCR_SPEED100;
133 break; 136 break;
134 case SPEED_1000: 137 case SPEED_1000:
138 ctl |= BMCR_SPEED1000;
139 break;
135 default: 140 default:
136 return -EINVAL; 141 return -EINVAL;
137 } 142 }
@@ -144,112 +149,143 @@ static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
144 149
145static int genmii_poll_link(struct mii_phy *phy) 150static int genmii_poll_link(struct mii_phy *phy)
146{ 151{
147 u16 status; 152 int status;
148 153
149 (void)phy_read(phy, MII_BMSR); 154 /* Clear latched value with dummy read */
155 phy_read(phy, MII_BMSR);
150 status = phy_read(phy, MII_BMSR); 156 status = phy_read(phy, MII_BMSR);
151 if ((status & BMSR_LSTATUS) == 0) 157 if (status < 0 || (status & BMSR_LSTATUS) == 0)
152 return 0; 158 return 0;
153 if (phy->autoneg && !(status & BMSR_ANEGCOMPLETE)) 159 if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
154 return 0; 160 return 0;
155 return 1; 161 return 1;
156} 162}
157 163
158#define MII_CIS8201_ACSR 0x1c 164static int genmii_read_link(struct mii_phy *phy)
159#define ACSR_DUPLEX_STATUS 0x0020
160#define ACSR_SPEED_1000BASET 0x0010
161#define ACSR_SPEED_100BASET 0x0008
162
163static int cis8201_read_link(struct mii_phy *phy)
164{ 165{
165 u16 acsr; 166 if (phy->autoneg == AUTONEG_ENABLE) {
167 int glpa = 0;
168 int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
169 if (lpa < 0)
170 return lpa;
171
172 if (phy->features &
173 (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
174 int adv = phy_read(phy, MII_CTRL1000);
175 glpa = phy_read(phy, MII_STAT1000);
176
177 if (glpa < 0 || adv < 0)
178 return adv;
179
180 glpa &= adv << 2;
181 }
182
183 phy->speed = SPEED_10;
184 phy->duplex = DUPLEX_HALF;
185 phy->pause = phy->asym_pause = 0;
186
187 if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
188 phy->speed = SPEED_1000;
189 if (glpa & LPA_1000FULL)
190 phy->duplex = DUPLEX_FULL;
191 } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
192 phy->speed = SPEED_100;
193 if (lpa & LPA_100FULL)
194 phy->duplex = DUPLEX_FULL;
195 } else if (lpa & LPA_10FULL)
196 phy->duplex = DUPLEX_FULL;
166 197
167 if (phy->autoneg) { 198 if (phy->duplex == DUPLEX_FULL) {
168 acsr = phy_read(phy, MII_CIS8201_ACSR); 199 phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
200 phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
201 }
202 } else {
203 int bmcr = phy_read(phy, MII_BMCR);
204 if (bmcr < 0)
205 return bmcr;
169 206
170 if (acsr & ACSR_DUPLEX_STATUS) 207 if (bmcr & BMCR_FULLDPLX)
171 phy->duplex = DUPLEX_FULL; 208 phy->duplex = DUPLEX_FULL;
172 else 209 else
173 phy->duplex = DUPLEX_HALF; 210 phy->duplex = DUPLEX_HALF;
174 if (acsr & ACSR_SPEED_1000BASET) { 211 if (bmcr & BMCR_SPEED1000)
175 phy->speed = SPEED_1000; 212 phy->speed = SPEED_1000;
176 } else if (acsr & ACSR_SPEED_100BASET) 213 else if (bmcr & BMCR_SPEED100)
177 phy->speed = SPEED_100; 214 phy->speed = SPEED_100;
178 else 215 else
179 phy->speed = SPEED_10; 216 phy->speed = SPEED_10;
180 phy->pause = 0;
181 }
182 /* On non-aneg, we assume what we put in BMCR is the speed,
183 * though magic-aneg shouldn't prevent this case from occurring
184 */
185 217
218 phy->pause = phy->asym_pause = 0;
219 }
186 return 0; 220 return 0;
187} 221}
188 222
189static int genmii_read_link(struct mii_phy *phy) 223/* Generic implementation for most 10/100/1000 PHYs */
224static struct mii_phy_ops generic_phy_ops = {
225 .setup_aneg = genmii_setup_aneg,
226 .setup_forced = genmii_setup_forced,
227 .poll_link = genmii_poll_link,
228 .read_link = genmii_read_link
229};
230
231static struct mii_phy_def genmii_phy_def = {
232 .phy_id = 0x00000000,
233 .phy_id_mask = 0x00000000,
234 .name = "Generic MII",
235 .ops = &generic_phy_ops
236};
237
238/* CIS8201 */
239#define MII_CIS8201_EPCR 0x17
240#define EPCR_MODE_MASK 0x3000
241#define EPCR_GMII_MODE 0x0000
242#define EPCR_RGMII_MODE 0x1000
243#define EPCR_TBI_MODE 0x2000
244#define EPCR_RTBI_MODE 0x3000
245
246static int cis8201_init(struct mii_phy *phy)
190{ 247{
191 u16 lpa; 248 int epcr;
192 249
193 if (phy->autoneg) { 250 epcr = phy_read(phy, MII_CIS8201_EPCR);
194 lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE); 251 if (epcr < 0)
252 return epcr;
195 253
196 phy->speed = SPEED_10; 254 epcr &= ~EPCR_MODE_MASK;
197 phy->duplex = DUPLEX_HALF;
198 phy->pause = 0;
199 255
200 if (lpa & (LPA_100FULL | LPA_100HALF)) { 256 switch (phy->mode) {
201 phy->speed = SPEED_100; 257 case PHY_MODE_TBI:
202 if (lpa & LPA_100FULL) 258 epcr |= EPCR_TBI_MODE;
203 phy->duplex = DUPLEX_FULL; 259 break;
204 } else if (lpa & LPA_10FULL) 260 case PHY_MODE_RTBI:
205 phy->duplex = DUPLEX_FULL; 261 epcr |= EPCR_RTBI_MODE;
262 break;
263 case PHY_MODE_GMII:
264 epcr |= EPCR_GMII_MODE;
265 break;
266 case PHY_MODE_RGMII:
267 default:
268 epcr |= EPCR_RGMII_MODE;
206 } 269 }
207 /* On non-aneg, we assume what we put in BMCR is the speed, 270
208 * though magic-aneg shouldn't prevent this case from occurring 271 phy_write(phy, MII_CIS8201_EPCR, epcr);
209 */
210 272
211 return 0; 273 return 0;
212} 274}
213 275
214#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
215 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
216 SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII)
217#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
218 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)
219
220/* CIS8201 phy ops */
221static struct mii_phy_ops cis8201_phy_ops = { 276static struct mii_phy_ops cis8201_phy_ops = {
222 init:cis8201_init, 277 .init = cis8201_init,
223 setup_aneg:genmii_setup_aneg, 278 .setup_aneg = genmii_setup_aneg,
224 setup_forced:genmii_setup_forced, 279 .setup_forced = genmii_setup_forced,
225 poll_link:genmii_poll_link, 280 .poll_link = genmii_poll_link,
226 read_link:cis8201_read_link 281 .read_link = genmii_read_link
227};
228
229/* Generic implementation for most 10/100 PHYs */
230static struct mii_phy_ops generic_phy_ops = {
231 setup_aneg:genmii_setup_aneg,
232 setup_forced:genmii_setup_forced,
233 poll_link:genmii_poll_link,
234 read_link:genmii_read_link
235}; 282};
236 283
237static struct mii_phy_def cis8201_phy_def = { 284static struct mii_phy_def cis8201_phy_def = {
238 phy_id:0x000fc410, 285 .phy_id = 0x000fc410,
239 phy_id_mask:0x000ffff0, 286 .phy_id_mask = 0x000ffff0,
240 name:"CIS8201 Gigabit Ethernet", 287 .name = "CIS8201 Gigabit Ethernet",
241 features:MII_GBIT_FEATURES, 288 .ops = &cis8201_phy_ops
242 magic_aneg:0,
243 ops:&cis8201_phy_ops
244};
245
246static struct mii_phy_def genmii_phy_def = {
247 phy_id:0x00000000,
248 phy_id_mask:0x00000000,
249 name:"Generic MII",
250 features:MII_BASIC_FEATURES,
251 magic_aneg:0,
252 ops:&generic_phy_ops
253}; 289};
254 290
255static struct mii_phy_def *mii_phy_table[] = { 291static struct mii_phy_def *mii_phy_table[] = {
@@ -258,39 +294,60 @@ static struct mii_phy_def *mii_phy_table[] = {
258 NULL 294 NULL
259}; 295};
260 296
261int mii_phy_probe(struct mii_phy *phy, int mii_id) 297int mii_phy_probe(struct mii_phy *phy, int address)
262{ 298{
263 int rc;
264 u32 id;
265 struct mii_phy_def *def; 299 struct mii_phy_def *def;
266 int i; 300 int i;
301 u32 id;
267 302
268 phy->autoneg = 0; 303 phy->autoneg = AUTONEG_DISABLE;
269 phy->advertising = 0; 304 phy->advertising = 0;
270 phy->mii_id = mii_id; 305 phy->address = address;
271 phy->speed = 0; 306 phy->speed = SPEED_10;
272 phy->duplex = 0; 307 phy->duplex = DUPLEX_HALF;
273 phy->pause = 0; 308 phy->pause = phy->asym_pause = 0;
274 309
275 /* Take PHY out of isloate mode and reset it. */ 310 /* Take PHY out of isolate mode and reset it. */
276 rc = reset_one_mii_phy(phy, mii_id); 311 if (mii_reset_phy(phy))
277 if (rc)
278 return -ENODEV; 312 return -ENODEV;
279 313
280 /* Read ID and find matching entry */ 314 /* Read ID and find matching entry */
281 id = (phy_read(phy, MII_PHYSID1) << 16 | phy_read(phy, MII_PHYSID2)) 315 id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
282 & 0xfffffff0;
283 for (i = 0; (def = mii_phy_table[i]) != NULL; i++) 316 for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
284 if ((id & def->phy_id_mask) == def->phy_id) 317 if ((id & def->phy_id_mask) == def->phy_id)
285 break; 318 break;
286 /* Should never be NULL (we have a generic entry), but... */ 319 /* Should never be NULL (we have a generic entry), but... */
287 if (def == NULL) 320 if (!def)
288 return -ENODEV; 321 return -ENODEV;
289 322
290 phy->def = def; 323 phy->def = def;
291 324
325 /* Determine PHY features if needed */
326 phy->features = def->features;
327 if (!phy->features) {
328 u16 bmsr = phy_read(phy, MII_BMSR);
329 if (bmsr & BMSR_ANEGCAPABLE)
330 phy->features |= SUPPORTED_Autoneg;
331 if (bmsr & BMSR_10HALF)
332 phy->features |= SUPPORTED_10baseT_Half;
333 if (bmsr & BMSR_10FULL)
334 phy->features |= SUPPORTED_10baseT_Full;
335 if (bmsr & BMSR_100HALF)
336 phy->features |= SUPPORTED_100baseT_Half;
337 if (bmsr & BMSR_100FULL)
338 phy->features |= SUPPORTED_100baseT_Full;
339 if (bmsr & BMSR_ESTATEN) {
340 u16 esr = phy_read(phy, MII_ESTATUS);
341 if (esr & ESTATUS_1000_TFULL)
342 phy->features |= SUPPORTED_1000baseT_Full;
343 if (esr & ESTATUS_1000_THALF)
344 phy->features |= SUPPORTED_1000baseT_Half;
345 }
346 phy->features |= SUPPORTED_MII;
347 }
348
292 /* Setup default advertising */ 349 /* Setup default advertising */
293 phy->advertising = def->features; 350 phy->advertising = phy->features;
294 351
295 return 0; 352 return 0;
296} 353}
diff --git a/drivers/net/ibm_emac/ibm_emac_phy.h b/drivers/net/ibm_emac/ibm_emac_phy.h
index 61afbea96563..a70e0fea54c4 100644
--- a/drivers/net/ibm_emac/ibm_emac_phy.h
+++ b/drivers/net/ibm_emac/ibm_emac_phy.h
@@ -1,65 +1,25 @@
1
2/* 1/*
3 * ibm_emac_phy.h 2 * drivers/net/ibm_emac/ibm_emac_phy.h
4 *
5 * 3 *
6 * Benjamin Herrenschmidt <benh@kernel.crashing.org> 4 * Driver for PowerPC 4xx on-chip ethernet controller, PHY support
7 * February 2003
8 * 5 *
9 * This program is free software; you can redistribute it and/or modify it 6 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
10 * under the terms of the GNU General Public License as published by the 7 * February 2003
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 * 8 *
14 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 9 * Minor additions by Eugene Surovegin <ebs@ebshome.net>, 2004
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
20 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 *
25 * You should have received a copy of the GNU General Public License along
26 * with this program; if not, write to the Free Software Foundation, Inc.,
27 * 675 Mass Ave, Cambridge, MA 02139, USA.
28 * 10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
29 * 15 *
30 * This file basically duplicates sungem_phy.{c,h} with different PHYs 16 * This file basically duplicates sungem_phy.{c,h} with different PHYs
31 * supported. I'm looking into merging that in a single mii layer more 17 * supported. I'm looking into merging that in a single mii layer more
32 * flexible than mii.c 18 * flexible than mii.c
33 */ 19 */
34 20
35#ifndef _IBM_EMAC_PHY_H_ 21#ifndef _IBM_OCP_PHY_H_
36#define _IBM_EMAC_PHY_H_ 22#define _IBM_OCP_PHY_H_
37
38/*
39 * PHY mode settings
40 * Used for multi-mode capable PHYs
41 */
42#define PHY_MODE_NA 0
43#define PHY_MODE_MII 1
44#define PHY_MODE_RMII 2
45#define PHY_MODE_SMII 3
46#define PHY_MODE_RGMII 4
47#define PHY_MODE_TBI 5
48#define PHY_MODE_GMII 6
49#define PHY_MODE_RTBI 7
50#define PHY_MODE_SGMII 8
51
52/*
53 * PHY specific registers/values
54 */
55
56/* CIS8201 */
57#define MII_CIS8201_EPCR 0x17
58#define EPCR_MODE_MASK 0x3000
59#define EPCR_GMII_MODE 0x0000
60#define EPCR_RGMII_MODE 0x1000
61#define EPCR_TBI_MODE 0x2000
62#define EPCR_RTBI_MODE 0x3000
63 23
64struct mii_phy; 24struct mii_phy;
65 25
@@ -77,7 +37,8 @@ struct mii_phy_ops {
77struct mii_phy_def { 37struct mii_phy_def {
78 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */ 38 u32 phy_id; /* Concatenated ID1 << 16 | ID2 */
79 u32 phy_id_mask; /* Significant bits */ 39 u32 phy_id_mask; /* Significant bits */
80 u32 features; /* Ethtool SUPPORTED_* defines */ 40 u32 features; /* Ethtool SUPPORTED_* defines or
41 0 for autodetect */
81 int magic_aneg; /* Autoneg does all speed test for us */ 42 int magic_aneg; /* Autoneg does all speed test for us */
82 const char *name; 43 const char *name;
83 const struct mii_phy_ops *ops; 44 const struct mii_phy_ops *ops;
@@ -86,8 +47,11 @@ struct mii_phy_def {
86/* An instance of a PHY, partially borrowed from mii_if_info */ 47/* An instance of a PHY, partially borrowed from mii_if_info */
87struct mii_phy { 48struct mii_phy {
88 struct mii_phy_def *def; 49 struct mii_phy_def *def;
89 int advertising; 50 u32 advertising; /* Ethtool ADVERTISED_* defines */
90 int mii_id; 51 u32 features; /* Copied from mii_phy_def.features
52 or determined automaticaly */
53 int address; /* PHY address */
54 int mode; /* PHY mode */
91 55
92 /* 1: autoneg enabled, 0: disabled */ 56 /* 1: autoneg enabled, 0: disabled */
93 int autoneg; 57 int autoneg;
@@ -98,40 +62,19 @@ struct mii_phy {
98 int speed; 62 int speed;
99 int duplex; 63 int duplex;
100 int pause; 64 int pause;
101 65 int asym_pause;
102 /* PHY mode - if needed */
103 int mode;
104 66
105 /* Provided by host chip */ 67 /* Provided by host chip */
106 struct net_device *dev; 68 struct net_device *dev;
107 int (*mdio_read) (struct net_device * dev, int mii_id, int reg); 69 int (*mdio_read) (struct net_device * dev, int addr, int reg);
108 void (*mdio_write) (struct net_device * dev, int mii_id, int reg, 70 void (*mdio_write) (struct net_device * dev, int addr, int reg,
109 int val); 71 int val);
110}; 72};
111 73
112/* Pass in a struct mii_phy with dev, mdio_read and mdio_write 74/* Pass in a struct mii_phy with dev, mdio_read and mdio_write
113 * filled, the remaining fields will be filled on return 75 * filled, the remaining fields will be filled on return
114 */ 76 */
115extern int mii_phy_probe(struct mii_phy *phy, int mii_id); 77int mii_phy_probe(struct mii_phy *phy, int address);
116 78int mii_reset_phy(struct mii_phy *phy);
117static inline int __phy_read(struct mii_phy *phy, int id, int reg)
118{
119 return phy->mdio_read(phy->dev, id, reg);
120}
121
122static inline void __phy_write(struct mii_phy *phy, int id, int reg, int val)
123{
124 phy->mdio_write(phy->dev, id, reg, val);
125}
126
127static inline int phy_read(struct mii_phy *phy, int reg)
128{
129 return phy->mdio_read(phy->dev, phy->mii_id, reg);
130}
131
132static inline void phy_write(struct mii_phy *phy, int reg, int val)
133{
134 phy->mdio_write(phy->dev, phy->mii_id, reg, val);
135}
136 79
137#endif /* _IBM_EMAC_PHY_H_ */ 80#endif /* _IBM_OCP_PHY_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.c b/drivers/net/ibm_emac/ibm_emac_rgmii.c
new file mode 100644
index 000000000000..f0b1ffb2dbbf
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.c
@@ -0,0 +1,201 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Matt Porter <mporter@kernel.crashing.org>
11 * Copyright 2004 MontaVista Software, Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/ethtool.h>
22#include <asm/io.h>
23
24#include "ibm_emac_core.h"
25#include "ibm_emac_debug.h"
26
27/* RGMIIx_FER */
28#define RGMII_FER_MASK(idx) (0x7 << ((idx) * 4))
29#define RGMII_FER_RTBI(idx) (0x4 << ((idx) * 4))
30#define RGMII_FER_RGMII(idx) (0x5 << ((idx) * 4))
31#define RGMII_FER_TBI(idx) (0x6 << ((idx) * 4))
32#define RGMII_FER_GMII(idx) (0x7 << ((idx) * 4))
33
34/* RGMIIx_SSR */
35#define RGMII_SSR_MASK(idx) (0x7 << ((idx) * 8))
36#define RGMII_SSR_100(idx) (0x2 << ((idx) * 8))
37#define RGMII_SSR_1000(idx) (0x4 << ((idx) * 8))
38
39/* RGMII bridge supports only GMII/TBI and RGMII/RTBI PHYs */
40static inline int rgmii_valid_mode(int phy_mode)
41{
42 return phy_mode == PHY_MODE_GMII ||
43 phy_mode == PHY_MODE_RGMII ||
44 phy_mode == PHY_MODE_TBI ||
45 phy_mode == PHY_MODE_RTBI;
46}
47
48static inline const char *rgmii_mode_name(int mode)
49{
50 switch (mode) {
51 case PHY_MODE_RGMII:
52 return "RGMII";
53 case PHY_MODE_TBI:
54 return "TBI";
55 case PHY_MODE_GMII:
56 return "GMII";
57 case PHY_MODE_RTBI:
58 return "RTBI";
59 default:
60 BUG();
61 }
62}
63
64static inline u32 rgmii_mode_mask(int mode, int input)
65{
66 switch (mode) {
67 case PHY_MODE_RGMII:
68 return RGMII_FER_RGMII(input);
69 case PHY_MODE_TBI:
70 return RGMII_FER_TBI(input);
71 case PHY_MODE_GMII:
72 return RGMII_FER_GMII(input);
73 case PHY_MODE_RTBI:
74 return RGMII_FER_RTBI(input);
75 default:
76 BUG();
77 }
78}
79
80static int __init rgmii_init(struct ocp_device *ocpdev, int input, int mode)
81{
82 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
83 struct rgmii_regs *p;
84
85 RGMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, mode);
86
87 if (!dev) {
88 dev = kzalloc(sizeof(struct ibm_ocp_rgmii), GFP_KERNEL);
89 if (!dev) {
90 printk(KERN_ERR
91 "rgmii%d: couldn't allocate device structure!\n",
92 ocpdev->def->index);
93 return -ENOMEM;
94 }
95
96 p = (struct rgmii_regs *)ioremap(ocpdev->def->paddr,
97 sizeof(struct rgmii_regs));
98 if (!p) {
99 printk(KERN_ERR
100 "rgmii%d: could not ioremap device registers!\n",
101 ocpdev->def->index);
102 kfree(dev);
103 return -ENOMEM;
104 }
105
106 dev->base = p;
107 ocp_set_drvdata(ocpdev, dev);
108
109 /* Disable all inputs by default */
110 out_be32(&p->fer, 0);
111 } else
112 p = dev->base;
113
114 /* Enable this input */
115 out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input));
116
117 printk(KERN_NOTICE "rgmii%d: input %d in %s mode\n",
118 ocpdev->def->index, input, rgmii_mode_name(mode));
119
120 ++dev->users;
121 return 0;
122}
123
124int __init rgmii_attach(void *emac)
125{
126 struct ocp_enet_private *dev = emac;
127 struct ocp_func_emac_data *emacdata = dev->def->additions;
128
129 /* Check if we need to attach to a RGMII */
130 if (emacdata->rgmii_idx >= 0 && rgmii_valid_mode(emacdata->phy_mode)) {
131 dev->rgmii_input = emacdata->rgmii_mux;
132 dev->rgmii_dev =
133 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_RGMII,
134 emacdata->rgmii_idx);
135 if (!dev->rgmii_dev) {
136 printk(KERN_ERR "emac%d: unknown rgmii%d!\n",
137 dev->def->index, emacdata->rgmii_idx);
138 return -ENODEV;
139 }
140 if (rgmii_init
141 (dev->rgmii_dev, dev->rgmii_input, emacdata->phy_mode)) {
142 printk(KERN_ERR
143 "emac%d: rgmii%d initialization failed!\n",
144 dev->def->index, emacdata->rgmii_idx);
145 return -ENODEV;
146 }
147 }
148 return 0;
149}
150
151void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
152{
153 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
154 u32 ssr = in_be32(&dev->base->ssr) & ~RGMII_SSR_MASK(input);
155
156 RGMII_DBG("%d: speed(%d, %d)" NL, ocpdev->def->index, input, speed);
157
158 if (speed == SPEED_1000)
159 ssr |= RGMII_SSR_1000(input);
160 else if (speed == SPEED_100)
161 ssr |= RGMII_SSR_100(input);
162
163 out_be32(&dev->base->ssr, ssr);
164}
165
166void __exit __rgmii_fini(struct ocp_device *ocpdev, int input)
167{
168 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
169 BUG_ON(!dev || dev->users == 0);
170
171 RGMII_DBG("%d: fini(%d)" NL, ocpdev->def->index, input);
172
173 /* Disable this input */
174 out_be32(&dev->base->fer,
175 in_be32(&dev->base->fer) & ~RGMII_FER_MASK(input));
176
177 if (!--dev->users) {
178 /* Free everything if this is the last user */
179 ocp_set_drvdata(ocpdev, NULL);
180 iounmap((void *)dev->base);
181 kfree(dev);
182 }
183}
184
185int __rgmii_get_regs_len(struct ocp_device *ocpdev)
186{
187 return sizeof(struct emac_ethtool_regs_subhdr) +
188 sizeof(struct rgmii_regs);
189}
190
191void *rgmii_dump_regs(struct ocp_device *ocpdev, void *buf)
192{
193 struct ibm_ocp_rgmii *dev = ocp_get_drvdata(ocpdev);
194 struct emac_ethtool_regs_subhdr *hdr = buf;
195 struct rgmii_regs *regs = (struct rgmii_regs *)(hdr + 1);
196
197 hdr->version = 0;
198 hdr->index = ocpdev->def->index;
199 memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
200 return regs + 1;
201}
diff --git a/drivers/net/ibm_emac/ibm_emac_rgmii.h b/drivers/net/ibm_emac/ibm_emac_rgmii.h
index 49f188f4ea6e..a1ffb8a44fff 100644
--- a/drivers/net/ibm_emac/ibm_emac_rgmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_rgmii.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Defines for the IBM RGMII bridge 2 * drivers/net/ibm_emac/ibm_emac_rgmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, RGMII bridge support.
3 * 5 *
4 * Based on ocp_zmii.h/ibm_emac_zmii.h 6 * Based on ocp_zmii.h/ibm_emac_zmii.h
5 * Armin Kuster akuster@mvista.com 7 * Armin Kuster akuster@mvista.com
@@ -7,6 +9,9 @@
7 * Copyright 2004 MontaVista Software, Inc. 9 * Copyright 2004 MontaVista Software, Inc.
8 * Matt Porter <mporter@kernel.crashing.org> 10 * Matt Porter <mporter@kernel.crashing.org>
9 * 11 *
12 * Copyright (c) 2004, 2005 Zultys Technologies.
13 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
14 *
10 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 17 * Free Software Foundation; either version 2 of the License, or (at your
@@ -19,47 +24,42 @@
19#include <linux/config.h> 24#include <linux/config.h>
20 25
21/* RGMII bridge */ 26/* RGMII bridge */
22typedef struct rgmii_regs { 27struct rgmii_regs {
23 u32 fer; /* Function enable register */ 28 u32 fer; /* Function enable register */
24 u32 ssr; /* Speed select register */ 29 u32 ssr; /* Speed select register */
25} rgmii_t; 30};
26
27#define RGMII_INPUTS 4
28 31
29/* RGMII device */ 32/* RGMII device */
30struct ibm_ocp_rgmii { 33struct ibm_ocp_rgmii {
31 struct rgmii_regs *base; 34 struct rgmii_regs *base;
32 int mode[RGMII_INPUTS];
33 int users; /* number of EMACs using this RGMII bridge */ 35 int users; /* number of EMACs using this RGMII bridge */
34}; 36};
35 37
36/* Fuctional Enable Reg */ 38#ifdef CONFIG_IBM_EMAC_RGMII
37#define RGMII_FER_MASK(x) (0x00000007 << (4*x)) 39int rgmii_attach(void *emac) __init;
38#define RGMII_RTBI 0x00000004
39#define RGMII_RGMII 0x00000005
40#define RGMII_TBI 0x00000006
41#define RGMII_GMII 0x00000007
42
43/* Speed Selection reg */
44 40
45#define RGMII_SP2_100 0x00000002 41void __rgmii_fini(struct ocp_device *ocpdev, int input) __exit;
46#define RGMII_SP2_1000 0x00000004 42static inline void rgmii_fini(struct ocp_device *ocpdev, int input)
47#define RGMII_SP3_100 0x00000200 43{
48#define RGMII_SP3_1000 0x00000400 44 if (ocpdev)
45 __rgmii_fini(ocpdev, input);
46}
49 47
50#define RGMII_MII2_SPDMASK 0x00000007 48void rgmii_set_speed(struct ocp_device *ocpdev, int input, int speed);
51#define RGMII_MII3_SPDMASK 0x00000700
52 49
53#define RGMII_MII2_100MB RGMII_SP2_100 & ~RGMII_SP2_1000 50int __rgmii_get_regs_len(struct ocp_device *ocpdev);
54#define RGMII_MII2_1000MB RGMII_SP2_1000 & ~RGMII_SP2_100 51static inline int rgmii_get_regs_len(struct ocp_device *ocpdev)
55#define RGMII_MII2_10MB ~(RGMII_SP2_100 | RGMII_SP2_1000) 52{
56#define RGMII_MII3_100MB RGMII_SP3_100 & ~RGMII_SP3_1000 53 return ocpdev ? __rgmii_get_regs_len(ocpdev) : 0;
57#define RGMII_MII3_1000MB RGMII_SP3_1000 & ~RGMII_SP3_100 54}
58#define RGMII_MII3_10MB ~(RGMII_SP3_100 | RGMII_SP3_1000)
59 55
60#define RTBI 0 56void *rgmii_dump_regs(struct ocp_device *ocpdev, void *buf);
61#define RGMII 1 57#else
62#define TBI 2 58# define rgmii_attach(x) 0
63#define GMII 3 59# define rgmii_fini(x,y) ((void)0)
60# define rgmii_set_speed(x,y,z) ((void)0)
61# define rgmii_get_regs_len(x) 0
62# define rgmii_dump_regs(x,buf) (buf)
63#endif /* !CONFIG_IBM_EMAC_RGMII */
64 64
65#endif /* _IBM_EMAC_RGMII_H_ */ 65#endif /* _IBM_EMAC_RGMII_H_ */
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.c b/drivers/net/ibm_emac/ibm_emac_tah.c
new file mode 100644
index 000000000000..af08afc22f9f
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_tah.c
@@ -0,0 +1,111 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_tah.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
5 *
6 * Copyright 2004 MontaVista Software, Inc.
7 * Matt Porter <mporter@kernel.crashing.org>
8 *
9 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 */
16#include <linux/config.h>
17#include <asm/io.h>
18
19#include "ibm_emac_core.h"
20
21static int __init tah_init(struct ocp_device *ocpdev)
22{
23 struct tah_regs *p;
24
25 if (ocp_get_drvdata(ocpdev)) {
26 printk(KERN_ERR "tah%d: already in use!\n", ocpdev->def->index);
27 return -EBUSY;
28 }
29
30 /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */
31 p = (struct tah_regs *)ioremap(ocpdev->def->paddr, sizeof(*p));
32 if (!p) {
33 printk(KERN_ERR "tah%d: could not ioremap device registers!\n",
34 ocpdev->def->index);
35 return -ENOMEM;
36 }
37 ocp_set_drvdata(ocpdev, p);
38 __tah_reset(ocpdev);
39
40 return 0;
41}
42
43int __init tah_attach(void *emac)
44{
45 struct ocp_enet_private *dev = emac;
46 struct ocp_func_emac_data *emacdata = dev->def->additions;
47
48 /* Check if we need to attach to a TAH */
49 if (emacdata->tah_idx >= 0) {
50 dev->tah_dev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_TAH,
51 emacdata->tah_idx);
52 if (!dev->tah_dev) {
53 printk(KERN_ERR "emac%d: unknown tah%d!\n",
54 dev->def->index, emacdata->tah_idx);
55 return -ENODEV;
56 }
57 if (tah_init(dev->tah_dev)) {
58 printk(KERN_ERR
59 "emac%d: tah%d initialization failed!\n",
60 dev->def->index, emacdata->tah_idx);
61 return -ENODEV;
62 }
63 }
64 return 0;
65}
66
67void __exit __tah_fini(struct ocp_device *ocpdev)
68{
69 struct tah_regs *p = ocp_get_drvdata(ocpdev);
70 BUG_ON(!p);
71 ocp_set_drvdata(ocpdev, NULL);
72 iounmap((void *)p);
73}
74
75void __tah_reset(struct ocp_device *ocpdev)
76{
77 struct tah_regs *p = ocp_get_drvdata(ocpdev);
78 int n;
79
80 /* Reset TAH */
81 out_be32(&p->mr, TAH_MR_SR);
82 n = 100;
83 while ((in_be32(&p->mr) & TAH_MR_SR) && n)
84 --n;
85
86 if (unlikely(!n))
87 printk(KERN_ERR "tah%d: reset timeout\n", ocpdev->def->index);
88
89 /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
90 out_be32(&p->mr,
91 TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
92 TAH_MR_DIG);
93}
94
95int __tah_get_regs_len(struct ocp_device *ocpdev)
96{
97 return sizeof(struct emac_ethtool_regs_subhdr) +
98 sizeof(struct tah_regs);
99}
100
101void *tah_dump_regs(struct ocp_device *ocpdev, void *buf)
102{
103 struct tah_regs *dev = ocp_get_drvdata(ocpdev);
104 struct emac_ethtool_regs_subhdr *hdr = buf;
105 struct tah_regs *regs = (struct tah_regs *)(hdr + 1);
106
107 hdr->version = 0;
108 hdr->index = ocpdev->def->index;
109 memcpy_fromio(regs, dev, sizeof(struct tah_regs));
110 return regs + 1;
111}
diff --git a/drivers/net/ibm_emac/ibm_emac_tah.h b/drivers/net/ibm_emac/ibm_emac_tah.h
index ecfc69805521..9299b5dd7eb1 100644
--- a/drivers/net/ibm_emac/ibm_emac_tah.h
+++ b/drivers/net/ibm_emac/ibm_emac_tah.h
@@ -1,9 +1,13 @@
1/* 1/*
2 * Defines for the IBM TAH 2 * drivers/net/ibm_emac/ibm_emac_tah.h
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, TAH support.
3 * 5 *
4 * Copyright 2004 MontaVista Software, Inc. 6 * Copyright 2004 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org> 7 * Matt Porter <mporter@kernel.crashing.org>
6 * 8 *
9 * Copyright (c) 2005 Eugene Surovegin <ebs@ebshome.net>
10 *
7 * This program is free software; you can redistribute it and/or modify it 11 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the 12 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your 13 * Free Software Foundation; either version 2 of the License, or (at your
@@ -13,36 +17,72 @@
13#ifndef _IBM_EMAC_TAH_H 17#ifndef _IBM_EMAC_TAH_H
14#define _IBM_EMAC_TAH_H 18#define _IBM_EMAC_TAH_H
15 19
20#include <linux/config.h>
21#include <linux/init.h>
22#include <asm/ocp.h>
23
16/* TAH */ 24/* TAH */
17typedef struct tah_regs { 25struct tah_regs {
18 u32 tah_revid; 26 u32 revid;
19 u32 pad[3]; 27 u32 pad[3];
20 u32 tah_mr; 28 u32 mr;
21 u32 tah_ssr0; 29 u32 ssr0;
22 u32 tah_ssr1; 30 u32 ssr1;
23 u32 tah_ssr2; 31 u32 ssr2;
24 u32 tah_ssr3; 32 u32 ssr3;
25 u32 tah_ssr4; 33 u32 ssr4;
26 u32 tah_ssr5; 34 u32 ssr5;
27 u32 tah_tsr; 35 u32 tsr;
28} tah_t; 36};
29 37
30/* TAH engine */ 38/* TAH engine */
31#define TAH_MR_CVR 0x80000000 39#define TAH_MR_CVR 0x80000000
32#define TAH_MR_SR 0x40000000 40#define TAH_MR_SR 0x40000000
33#define TAH_MR_ST_256 0x01000000 41#define TAH_MR_ST_256 0x01000000
34#define TAH_MR_ST_512 0x02000000 42#define TAH_MR_ST_512 0x02000000
35#define TAH_MR_ST_768 0x03000000 43#define TAH_MR_ST_768 0x03000000
36#define TAH_MR_ST_1024 0x04000000 44#define TAH_MR_ST_1024 0x04000000
37#define TAH_MR_ST_1280 0x05000000 45#define TAH_MR_ST_1280 0x05000000
38#define TAH_MR_ST_1536 0x06000000 46#define TAH_MR_ST_1536 0x06000000
39#define TAH_MR_TFS_16KB 0x00000000 47#define TAH_MR_TFS_16KB 0x00000000
40#define TAH_MR_TFS_2KB 0x00200000 48#define TAH_MR_TFS_2KB 0x00200000
41#define TAH_MR_TFS_4KB 0x00400000 49#define TAH_MR_TFS_4KB 0x00400000
42#define TAH_MR_TFS_6KB 0x00600000 50#define TAH_MR_TFS_6KB 0x00600000
43#define TAH_MR_TFS_8KB 0x00800000 51#define TAH_MR_TFS_8KB 0x00800000
44#define TAH_MR_TFS_10KB 0x00a00000 52#define TAH_MR_TFS_10KB 0x00a00000
45#define TAH_MR_DTFP 0x00100000 53#define TAH_MR_DTFP 0x00100000
46#define TAH_MR_DIG 0x00080000 54#define TAH_MR_DIG 0x00080000
55
56#ifdef CONFIG_IBM_EMAC_TAH
57int tah_attach(void *emac) __init;
58
59void __tah_fini(struct ocp_device *ocpdev) __exit;
60static inline void tah_fini(struct ocp_device *ocpdev)
61{
62 if (ocpdev)
63 __tah_fini(ocpdev);
64}
65
66void __tah_reset(struct ocp_device *ocpdev);
67static inline void tah_reset(struct ocp_device *ocpdev)
68{
69 if (ocpdev)
70 __tah_reset(ocpdev);
71}
72
73int __tah_get_regs_len(struct ocp_device *ocpdev);
74static inline int tah_get_regs_len(struct ocp_device *ocpdev)
75{
76 return ocpdev ? __tah_get_regs_len(ocpdev) : 0;
77}
78
79void *tah_dump_regs(struct ocp_device *ocpdev, void *buf);
80#else
81# define tah_attach(x) 0
82# define tah_fini(x) ((void)0)
83# define tah_reset(x) ((void)0)
84# define tah_get_regs_len(x) 0
85# define tah_dump_regs(x,buf) (buf)
86#endif /* !CONFIG_IBM_EMAC_TAH */
47 87
48#endif /* _IBM_EMAC_TAH_H */ 88#endif /* _IBM_EMAC_TAH_H */
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.c b/drivers/net/ibm_emac/ibm_emac_zmii.c
new file mode 100644
index 000000000000..35c1185079ed
--- /dev/null
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.c
@@ -0,0 +1,255 @@
1/*
2 * drivers/net/ibm_emac/ibm_emac_zmii.c
3 *
4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 *
9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2001 MontaVista Softare Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#include <linux/config.h>
20#include <linux/kernel.h>
21#include <linux/ethtool.h>
22#include <asm/io.h>
23
24#include "ibm_emac_core.h"
25#include "ibm_emac_debug.h"
26
27/* ZMIIx_FER */
28#define ZMII_FER_MDI(idx) (0x80000000 >> ((idx) * 4))
29#define ZMII_FER_MDI_ALL (ZMII_FER_MDI(0) | ZMII_FER_MDI(1) | \
30 ZMII_FER_MDI(2) | ZMII_FER_MDI(3))
31
32#define ZMII_FER_SMII(idx) (0x40000000 >> ((idx) * 4))
33#define ZMII_FER_RMII(idx) (0x20000000 >> ((idx) * 4))
34#define ZMII_FER_MII(idx) (0x10000000 >> ((idx) * 4))
35
36/* ZMIIx_SSR */
37#define ZMII_SSR_SCI(idx) (0x40000000 >> ((idx) * 4))
38#define ZMII_SSR_FSS(idx) (0x20000000 >> ((idx) * 4))
39#define ZMII_SSR_SP(idx) (0x10000000 >> ((idx) * 4))
40
41/* ZMII only supports MII, RMII and SMII
42 * we also support autodetection for backward compatibility
43 */
44static inline int zmii_valid_mode(int mode)
45{
46 return mode == PHY_MODE_MII ||
47 mode == PHY_MODE_RMII ||
48 mode == PHY_MODE_SMII ||
49 mode == PHY_MODE_NA;
50}
51
52static inline const char *zmii_mode_name(int mode)
53{
54 switch (mode) {
55 case PHY_MODE_MII:
56 return "MII";
57 case PHY_MODE_RMII:
58 return "RMII";
59 case PHY_MODE_SMII:
60 return "SMII";
61 default:
62 BUG();
63 }
64}
65
66static inline u32 zmii_mode_mask(int mode, int input)
67{
68 switch (mode) {
69 case PHY_MODE_MII:
70 return ZMII_FER_MII(input);
71 case PHY_MODE_RMII:
72 return ZMII_FER_RMII(input);
73 case PHY_MODE_SMII:
74 return ZMII_FER_SMII(input);
75 default:
76 return 0;
77 }
78}
79
80static int __init zmii_init(struct ocp_device *ocpdev, int input, int *mode)
81{
82 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
83 struct zmii_regs *p;
84
85 ZMII_DBG("%d: init(%d, %d)" NL, ocpdev->def->index, input, *mode);
86
87 if (!dev) {
88 dev = kzalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
89 if (!dev) {
90 printk(KERN_ERR
91 "zmii%d: couldn't allocate device structure!\n",
92 ocpdev->def->index);
93 return -ENOMEM;
94 }
95 dev->mode = PHY_MODE_NA;
96
97 p = (struct zmii_regs *)ioremap(ocpdev->def->paddr,
98 sizeof(struct zmii_regs));
99 if (!p) {
100 printk(KERN_ERR
101 "zmii%d: could not ioremap device registers!\n",
102 ocpdev->def->index);
103 kfree(dev);
104 return -ENOMEM;
105 }
106 dev->base = p;
107 ocp_set_drvdata(ocpdev, dev);
108
109 /* We may need FER value for autodetection later */
110 dev->fer_save = in_be32(&p->fer);
111
112 /* Disable all inputs by default */
113 out_be32(&p->fer, 0);
114 } else
115 p = dev->base;
116
117 if (!zmii_valid_mode(*mode)) {
118 /* Probably an EMAC connected to RGMII,
119 * but it still may need ZMII for MDIO
120 */
121 goto out;
122 }
123
124 /* Autodetect ZMII mode if not specified.
125 * This is only for backward compatibility with the old driver.
126 * Please, always specify PHY mode in your board port to avoid
127 * any surprises.
128 */
129 if (dev->mode == PHY_MODE_NA) {
130 if (*mode == PHY_MODE_NA) {
131 u32 r = dev->fer_save;
132
133 ZMII_DBG("%d: autodetecting mode, FER = 0x%08x" NL,
134 ocpdev->def->index, r);
135
136 if (r & (ZMII_FER_MII(0) | ZMII_FER_MII(1)))
137 dev->mode = PHY_MODE_MII;
138 else if (r & (ZMII_FER_RMII(0) | ZMII_FER_RMII(1)))
139 dev->mode = PHY_MODE_RMII;
140 else
141 dev->mode = PHY_MODE_SMII;
142 } else
143 dev->mode = *mode;
144
145 printk(KERN_NOTICE "zmii%d: bridge in %s mode\n",
146 ocpdev->def->index, zmii_mode_name(dev->mode));
147 } else {
148 /* All inputs must use the same mode */
149 if (*mode != PHY_MODE_NA && *mode != dev->mode) {
150 printk(KERN_ERR
151 "zmii%d: invalid mode %d specified for input %d\n",
152 ocpdev->def->index, *mode, input);
153 return -EINVAL;
154 }
155 }
156
157 /* Report back correct PHY mode,
158 * it may be used during PHY initialization.
159 */
160 *mode = dev->mode;
161
162 /* Enable this input */
163 out_be32(&p->fer, in_be32(&p->fer) | zmii_mode_mask(dev->mode, input));
164 out:
165 ++dev->users;
166 return 0;
167}
168
169int __init zmii_attach(void *emac)
170{
171 struct ocp_enet_private *dev = emac;
172 struct ocp_func_emac_data *emacdata = dev->def->additions;
173
174 if (emacdata->zmii_idx >= 0) {
175 dev->zmii_input = emacdata->zmii_mux;
176 dev->zmii_dev =
177 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_ZMII,
178 emacdata->zmii_idx);
179 if (!dev->zmii_dev) {
180 printk(KERN_ERR "emac%d: unknown zmii%d!\n",
181 dev->def->index, emacdata->zmii_idx);
182 return -ENODEV;
183 }
184 if (zmii_init
185 (dev->zmii_dev, dev->zmii_input, &emacdata->phy_mode)) {
186 printk(KERN_ERR
187 "emac%d: zmii%d initialization failed!\n",
188 dev->def->index, emacdata->zmii_idx);
189 return -ENODEV;
190 }
191 }
192 return 0;
193}
194
195void __zmii_enable_mdio(struct ocp_device *ocpdev, int input)
196{
197 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
198 u32 fer = in_be32(&dev->base->fer) & ~ZMII_FER_MDI_ALL;
199
200 ZMII_DBG2("%d: mdio(%d)" NL, ocpdev->def->index, input);
201
202 out_be32(&dev->base->fer, fer | ZMII_FER_MDI(input));
203}
204
205void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed)
206{
207 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
208 u32 ssr = in_be32(&dev->base->ssr);
209
210 ZMII_DBG("%d: speed(%d, %d)" NL, ocpdev->def->index, input, speed);
211
212 if (speed == SPEED_100)
213 ssr |= ZMII_SSR_SP(input);
214 else
215 ssr &= ~ZMII_SSR_SP(input);
216
217 out_be32(&dev->base->ssr, ssr);
218}
219
220void __exit __zmii_fini(struct ocp_device *ocpdev, int input)
221{
222 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
223 BUG_ON(!dev || dev->users == 0);
224
225 ZMII_DBG("%d: fini(%d)" NL, ocpdev->def->index, input);
226
227 /* Disable this input */
228 out_be32(&dev->base->fer,
229 in_be32(&dev->base->fer) & ~zmii_mode_mask(dev->mode, input));
230
231 if (!--dev->users) {
232 /* Free everything if this is the last user */
233 ocp_set_drvdata(ocpdev, NULL);
234 iounmap((void *)dev->base);
235 kfree(dev);
236 }
237}
238
239int __zmii_get_regs_len(struct ocp_device *ocpdev)
240{
241 return sizeof(struct emac_ethtool_regs_subhdr) +
242 sizeof(struct zmii_regs);
243}
244
245void *zmii_dump_regs(struct ocp_device *ocpdev, void *buf)
246{
247 struct ibm_ocp_zmii *dev = ocp_get_drvdata(ocpdev);
248 struct emac_ethtool_regs_subhdr *hdr = buf;
249 struct zmii_regs *regs = (struct zmii_regs *)(hdr + 1);
250
251 hdr->version = 0;
252 hdr->index = ocpdev->def->index;
253 memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
254 return regs + 1;
255}
diff --git a/drivers/net/ibm_emac/ibm_emac_zmii.h b/drivers/net/ibm_emac/ibm_emac_zmii.h
index 6f6cd2a39e38..0bb26062c0ad 100644
--- a/drivers/net/ibm_emac/ibm_emac_zmii.h
+++ b/drivers/net/ibm_emac/ibm_emac_zmii.h
@@ -1,23 +1,27 @@
1/* 1/*
2 * ocp_zmii.h 2 * drivers/net/ibm_emac/ibm_emac_zmii.h
3 * 3 *
4 * Defines for the IBM ZMII bridge 4 * Driver for PowerPC 4xx on-chip ethernet controller, ZMII bridge support.
5 * 5 *
6 * Armin Kuster akuster@mvista.com 6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Dec, 2001 7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8 * 8 *
9 * Copyright 2001 MontaVista Softare Inc. 9 * Based on original work by
10 * Armin Kuster <akuster@mvista.com>
11 * Copyright 2001 MontaVista Softare Inc.
10 * 12 *
11 * This program is free software; you can redistribute it and/or modify it 13 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the 14 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your 15 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version. 16 * option) any later version.
17 *
15 */ 18 */
16
17#ifndef _IBM_EMAC_ZMII_H_ 19#ifndef _IBM_EMAC_ZMII_H_
18#define _IBM_EMAC_ZMII_H_ 20#define _IBM_EMAC_ZMII_H_
19 21
20#include <linux/config.h> 22#include <linux/config.h>
23#include <linux/init.h>
24#include <asm/ocp.h>
21 25
22/* ZMII bridge registers */ 26/* ZMII bridge registers */
23struct zmii_regs { 27struct zmii_regs {
@@ -26,68 +30,54 @@ struct zmii_regs {
26 u32 smiirs; /* SMII status reg */ 30 u32 smiirs; /* SMII status reg */
27}; 31};
28 32
29#define ZMII_INPUTS 4
30
31/* ZMII device */ 33/* ZMII device */
32struct ibm_ocp_zmii { 34struct ibm_ocp_zmii {
33 struct zmii_regs *base; 35 struct zmii_regs *base;
34 int mode[ZMII_INPUTS]; 36 int mode; /* subset of PHY_MODE_XXXX */
35 int users; /* number of EMACs using this ZMII bridge */ 37 int users; /* number of EMACs using this ZMII bridge */
38 u32 fer_save; /* FER value left by firmware */
36}; 39};
37 40
38/* Fuctional Enable Reg */ 41#ifdef CONFIG_IBM_EMAC_ZMII
39 42int zmii_attach(void *emac) __init;
40#define ZMII_FER_MASK(x) (0xf0000000 >> (4*x))
41
42#define ZMII_MDI0 0x80000000
43#define ZMII_SMII0 0x40000000
44#define ZMII_RMII0 0x20000000
45#define ZMII_MII0 0x10000000
46#define ZMII_MDI1 0x08000000
47#define ZMII_SMII1 0x04000000
48#define ZMII_RMII1 0x02000000
49#define ZMII_MII1 0x01000000
50#define ZMII_MDI2 0x00800000
51#define ZMII_SMII2 0x00400000
52#define ZMII_RMII2 0x00200000
53#define ZMII_MII2 0x00100000
54#define ZMII_MDI3 0x00080000
55#define ZMII_SMII3 0x00040000
56#define ZMII_RMII3 0x00020000
57#define ZMII_MII3 0x00010000
58 43
59/* Speed Selection reg */ 44void __zmii_fini(struct ocp_device *ocpdev, int input) __exit;
45static inline void zmii_fini(struct ocp_device *ocpdev, int input)
46{
47 if (ocpdev)
48 __zmii_fini(ocpdev, input);
49}
60 50
61#define ZMII_SCI0 0x40000000 51void __zmii_enable_mdio(struct ocp_device *ocpdev, int input);
62#define ZMII_FSS0 0x20000000 52static inline void zmii_enable_mdio(struct ocp_device *ocpdev, int input)
63#define ZMII_SP0 0x10000000 53{
64#define ZMII_SCI1 0x04000000 54 if (ocpdev)
65#define ZMII_FSS1 0x02000000 55 __zmii_enable_mdio(ocpdev, input);
66#define ZMII_SP1 0x01000000 56}
67#define ZMII_SCI2 0x00400000
68#define ZMII_FSS2 0x00200000
69#define ZMII_SP2 0x00100000
70#define ZMII_SCI3 0x00040000
71#define ZMII_FSS3 0x00020000
72#define ZMII_SP3 0x00010000
73 57
74#define ZMII_MII0_100MB ZMII_SP0 58void __zmii_set_speed(struct ocp_device *ocpdev, int input, int speed);
75#define ZMII_MII0_10MB ~ZMII_SP0 59static inline void zmii_set_speed(struct ocp_device *ocpdev, int input,
76#define ZMII_MII1_100MB ZMII_SP1 60 int speed)
77#define ZMII_MII1_10MB ~ZMII_SP1 61{
78#define ZMII_MII2_100MB ZMII_SP2 62 if (ocpdev)
79#define ZMII_MII2_10MB ~ZMII_SP2 63 __zmii_set_speed(ocpdev, input, speed);
80#define ZMII_MII3_100MB ZMII_SP3 64}
81#define ZMII_MII3_10MB ~ZMII_SP3
82 65
83/* SMII Status reg */ 66int __zmii_get_regs_len(struct ocp_device *ocpdev);
67static inline int zmii_get_regs_len(struct ocp_device *ocpdev)
68{
69 return ocpdev ? __zmii_get_regs_len(ocpdev) : 0;
70}
84 71
85#define ZMII_STS0 0xFF000000 /* EMAC0 smii status mask */ 72void *zmii_dump_regs(struct ocp_device *ocpdev, void *buf);
86#define ZMII_STS1 0x00FF0000 /* EMAC1 smii status mask */
87 73
88#define SMII 0 74#else
89#define RMII 1 75# define zmii_attach(x) 0
90#define MII 2 76# define zmii_fini(x,y) ((void)0)
91#define MDI 3 77# define zmii_enable_mdio(x,y) ((void)0)
78# define zmii_set_speed(x,y,z) ((void)0)
79# define zmii_get_regs_len(x) 0
80# define zmii_dump_regs(x,buf) (buf)
81#endif /* !CONFIG_IBM_EMAC_ZMII */
92 82
93#endif /* _IBM_EMAC_ZMII_H_ */ 83#endif /* _IBM_EMAC_ZMII_H_ */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cbe9368a4d56..e5246f227c98 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -96,7 +96,7 @@ static void ibmveth_proc_unregister_driver(void);
96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter); 96static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter); 97static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 98static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
99static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*); 99static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
100 100
101#ifdef CONFIG_PROC_FS 101#ifdef CONFIG_PROC_FS
102#define IBMVETH_PROC_DIR "net/ibmveth" 102#define IBMVETH_PROC_DIR "net/ibmveth"
@@ -181,6 +181,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
181 atomic_set(&pool->available, 0); 181 atomic_set(&pool->available, 0);
182 pool->producer_index = 0; 182 pool->producer_index = 0;
183 pool->consumer_index = 0; 183 pool->consumer_index = 0;
184 pool->active = 0;
184 185
185 return 0; 186 return 0;
186} 187}
@@ -236,7 +237,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
236 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc); 237 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
237 238
238 if(lpar_rc != H_Success) { 239 if(lpar_rc != H_Success) {
239 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; 240 pool->free_map[free_index] = index;
240 pool->skbuff[index] = NULL; 241 pool->skbuff[index] = NULL;
241 pool->consumer_index--; 242 pool->consumer_index--;
242 dma_unmap_single(&adapter->vdev->dev, 243 dma_unmap_single(&adapter->vdev->dev,
@@ -255,37 +256,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
255 atomic_add(buffers_added, &(pool->available)); 256 atomic_add(buffers_added, &(pool->available));
256} 257}
257 258
258/* check if replenishing is needed. */ 259/* replenish routine */
259static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
260{
261 return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
262 (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
263 (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
264}
265
266/* kick the replenish tasklet if we need replenishing and it isn't already running */
267static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
268{
269 if(ibmveth_is_replenishing_needed(adapter) &&
270 (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
271 schedule_work(&adapter->replenish_task);
272 }
273}
274
275/* replenish tasklet routine */
276static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 260static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
277{ 261{
262 int i;
263
278 adapter->replenish_task_cycles++; 264 adapter->replenish_task_cycles++;
279 265
280 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 266 for(i = 0; i < IbmVethNumBufferPools; i++)
281 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 267 if(adapter->rx_buff_pool[i].active)
282 ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]); 268 ibmveth_replenish_buffer_pool(adapter,
269 &adapter->rx_buff_pool[i]);
283 270
284 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8); 271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
285
286 atomic_inc(&adapter->not_replenishing);
287
288 ibmveth_schedule_replenishing(adapter);
289} 272}
290 273
291/* empty and free ana buffer pool - also used to do cleanup in error paths */ 274/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -293,10 +276,8 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
293{ 276{
294 int i; 277 int i;
295 278
296 if(pool->free_map) { 279 kfree(pool->free_map);
297 kfree(pool->free_map); 280 pool->free_map = NULL;
298 pool->free_map = NULL;
299 }
300 281
301 if(pool->skbuff && pool->dma_addr) { 282 if(pool->skbuff && pool->dma_addr) {
302 for(i = 0; i < pool->size; ++i) { 283 for(i = 0; i < pool->size; ++i) {
@@ -321,6 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
321 kfree(pool->skbuff); 302 kfree(pool->skbuff);
322 pool->skbuff = NULL; 303 pool->skbuff = NULL;
323 } 304 }
305 pool->active = 0;
324} 306}
325 307
326/* remove a buffer from a pool */ 308/* remove a buffer from a pool */
@@ -379,6 +361,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
379 ibmveth_assert(pool < IbmVethNumBufferPools); 361 ibmveth_assert(pool < IbmVethNumBufferPools);
380 ibmveth_assert(index < adapter->rx_buff_pool[pool].size); 362 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
381 363
364 if(!adapter->rx_buff_pool[pool].active) {
365 ibmveth_rxq_harvest_buffer(adapter);
366 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
367 return;
368 }
369
382 desc.desc = 0; 370 desc.desc = 0;
383 desc.fields.valid = 1; 371 desc.fields.valid = 1;
384 desc.fields.length = adapter->rx_buff_pool[pool].buff_size; 372 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@ -409,6 +397,8 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
409 397
410static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 398static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
411{ 399{
400 int i;
401
412 if(adapter->buffer_list_addr != NULL) { 402 if(adapter->buffer_list_addr != NULL) {
413 if(!dma_mapping_error(adapter->buffer_list_dma)) { 403 if(!dma_mapping_error(adapter->buffer_list_dma)) {
414 dma_unmap_single(&adapter->vdev->dev, 404 dma_unmap_single(&adapter->vdev->dev,
@@ -443,26 +433,24 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
443 adapter->rx_queue.queue_addr = NULL; 433 adapter->rx_queue.queue_addr = NULL;
444 } 434 }
445 435
446 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]); 436 for(i = 0; i<IbmVethNumBufferPools; i++)
447 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]); 437 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
448 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
449} 438}
450 439
451static int ibmveth_open(struct net_device *netdev) 440static int ibmveth_open(struct net_device *netdev)
452{ 441{
453 struct ibmveth_adapter *adapter = netdev->priv; 442 struct ibmveth_adapter *adapter = netdev->priv;
454 u64 mac_address = 0; 443 u64 mac_address = 0;
455 int rxq_entries; 444 int rxq_entries = 1;
456 unsigned long lpar_rc; 445 unsigned long lpar_rc;
457 int rc; 446 int rc;
458 union ibmveth_buf_desc rxq_desc; 447 union ibmveth_buf_desc rxq_desc;
448 int i;
459 449
460 ibmveth_debug_printk("open starting\n"); 450 ibmveth_debug_printk("open starting\n");
461 451
462 rxq_entries = 452 for(i = 0; i<IbmVethNumBufferPools; i++)
463 adapter->rx_buff_pool[0].size + 453 rxq_entries += adapter->rx_buff_pool[i].size;
464 adapter->rx_buff_pool[1].size +
465 adapter->rx_buff_pool[2].size + 1;
466 454
467 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 455 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
468 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL); 456 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -502,14 +490,8 @@ static int ibmveth_open(struct net_device *netdev)
502 adapter->rx_queue.num_slots = rxq_entries; 490 adapter->rx_queue.num_slots = rxq_entries;
503 adapter->rx_queue.toggle = 1; 491 adapter->rx_queue.toggle = 1;
504 492
505 if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) || 493 /* call change_mtu to init the buffer pools based in initial mtu */
506 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) || 494 ibmveth_change_mtu(netdev, netdev->mtu);
507 ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
508 {
509 ibmveth_error_printk("unable to allocate buffer pools\n");
510 ibmveth_cleanup(adapter);
511 return -ENOMEM;
512 }
513 495
514 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); 496 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
515 mac_address = mac_address >> 16; 497 mac_address = mac_address >> 16;
@@ -552,10 +534,10 @@ static int ibmveth_open(struct net_device *netdev)
552 return rc; 534 return rc;
553 } 535 }
554 536
555 netif_start_queue(netdev); 537 ibmveth_debug_printk("initial replenish cycle\n");
538 ibmveth_replenish_task(adapter);
556 539
557 ibmveth_debug_printk("scheduling initial replenish cycle\n"); 540 netif_start_queue(netdev);
558 ibmveth_schedule_replenishing(adapter);
559 541
560 ibmveth_debug_printk("open complete\n"); 542 ibmveth_debug_printk("open complete\n");
561 543
@@ -573,9 +555,6 @@ static int ibmveth_close(struct net_device *netdev)
573 555
574 free_irq(netdev->irq, netdev); 556 free_irq(netdev->irq, netdev);
575 557
576 cancel_delayed_work(&adapter->replenish_task);
577 flush_scheduled_work();
578
579 do { 558 do {
580 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); 559 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
581 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy)); 560 } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -640,12 +619,18 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
640 unsigned long lpar_rc; 619 unsigned long lpar_rc;
641 int nfrags = 0, curfrag; 620 int nfrags = 0, curfrag;
642 unsigned long correlator; 621 unsigned long correlator;
622 unsigned long flags;
643 unsigned int retry_count; 623 unsigned int retry_count;
624 unsigned int tx_dropped = 0;
625 unsigned int tx_bytes = 0;
626 unsigned int tx_packets = 0;
627 unsigned int tx_send_failed = 0;
628 unsigned int tx_map_failed = 0;
629
644 630
645 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) { 631 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
646 adapter->stats.tx_dropped++; 632 tx_dropped++;
647 dev_kfree_skb(skb); 633 goto out;
648 return 0;
649 } 634 }
650 635
651 memset(&desc, 0, sizeof(desc)); 636 memset(&desc, 0, sizeof(desc));
@@ -664,10 +649,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
664 649
665 if(dma_mapping_error(desc[0].fields.address)) { 650 if(dma_mapping_error(desc[0].fields.address)) {
666 ibmveth_error_printk("tx: unable to map initial fragment\n"); 651 ibmveth_error_printk("tx: unable to map initial fragment\n");
667 adapter->tx_map_failed++; 652 tx_map_failed++;
668 adapter->stats.tx_dropped++; 653 tx_dropped++;
669 dev_kfree_skb(skb); 654 goto out;
670 return 0;
671 } 655 }
672 656
673 curfrag = nfrags; 657 curfrag = nfrags;
@@ -684,8 +668,8 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
684 668
685 if(dma_mapping_error(desc[curfrag+1].fields.address)) { 669 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
686 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag); 670 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
687 adapter->tx_map_failed++; 671 tx_map_failed++;
688 adapter->stats.tx_dropped++; 672 tx_dropped++;
689 /* Free all the mappings we just created */ 673 /* Free all the mappings we just created */
690 while(curfrag < nfrags) { 674 while(curfrag < nfrags) {
691 dma_unmap_single(&adapter->vdev->dev, 675 dma_unmap_single(&adapter->vdev->dev,
@@ -694,8 +678,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
694 DMA_TO_DEVICE); 678 DMA_TO_DEVICE);
695 curfrag++; 679 curfrag++;
696 } 680 }
697 dev_kfree_skb(skb); 681 goto out;
698 return 0;
699 } 682 }
700 } 683 }
701 684
@@ -720,11 +703,12 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
720 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i, 703 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
721 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address); 704 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
722 } 705 }
723 adapter->tx_send_failed++; 706 tx_send_failed++;
724 adapter->stats.tx_dropped++; 707 tx_dropped++;
725 } else { 708 } else {
726 adapter->stats.tx_packets++; 709 tx_packets++;
727 adapter->stats.tx_bytes += skb->len; 710 tx_bytes += skb->len;
711 netdev->trans_start = jiffies;
728 } 712 }
729 713
730 do { 714 do {
@@ -733,6 +717,14 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
733 desc[nfrags].fields.length, DMA_TO_DEVICE); 717 desc[nfrags].fields.length, DMA_TO_DEVICE);
734 } while(--nfrags >= 0); 718 } while(--nfrags >= 0);
735 719
720out: spin_lock_irqsave(&adapter->stats_lock, flags);
721 adapter->stats.tx_dropped += tx_dropped;
722 adapter->stats.tx_bytes += tx_bytes;
723 adapter->stats.tx_packets += tx_packets;
724 adapter->tx_send_failed += tx_send_failed;
725 adapter->tx_map_failed += tx_map_failed;
726 spin_unlock_irqrestore(&adapter->stats_lock, flags);
727
736 dev_kfree_skb(skb); 728 dev_kfree_skb(skb);
737 return 0; 729 return 0;
738} 730}
@@ -776,13 +768,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
776 adapter->stats.rx_packets++; 768 adapter->stats.rx_packets++;
777 adapter->stats.rx_bytes += length; 769 adapter->stats.rx_bytes += length;
778 frames_processed++; 770 frames_processed++;
771 netdev->last_rx = jiffies;
779 } 772 }
780 } else { 773 } else {
781 more_work = 0; 774 more_work = 0;
782 } 775 }
783 } while(more_work && (frames_processed < max_frames_to_process)); 776 } while(more_work && (frames_processed < max_frames_to_process));
784 777
785 ibmveth_schedule_replenishing(adapter); 778 ibmveth_replenish_task(adapter);
786 779
787 if(more_work) { 780 if(more_work) {
788 /* more work to do - return that we are not done yet */ 781 /* more work to do - return that we are not done yet */
@@ -883,17 +876,54 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
883 876
884static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) 877static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
885{ 878{
886 if ((new_mtu < 68) || (new_mtu > (1<<20))) 879 struct ibmveth_adapter *adapter = dev->priv;
880 int i;
881 int prev_smaller = 1;
882
883 if ((new_mtu < 68) ||
884 (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
887 return -EINVAL; 885 return -EINVAL;
886
887 for(i = 0; i<IbmVethNumBufferPools; i++) {
888 int activate = 0;
889 if (new_mtu > (pool_size[i] - IBMVETH_BUFF_OH)) {
890 activate = 1;
891 prev_smaller= 1;
892 } else {
893 if (prev_smaller)
894 activate = 1;
895 prev_smaller= 0;
896 }
897
898 if (activate && !adapter->rx_buff_pool[i].active) {
899 struct ibmveth_buff_pool *pool =
900 &adapter->rx_buff_pool[i];
901 if(ibmveth_alloc_buffer_pool(pool)) {
902 ibmveth_error_printk("unable to alloc pool\n");
903 return -ENOMEM;
904 }
905 adapter->rx_buff_pool[i].active = 1;
906 } else if (!activate && adapter->rx_buff_pool[i].active) {
907 adapter->rx_buff_pool[i].active = 0;
908 h_free_logical_lan_buffer(adapter->vdev->unit_address,
909 (u64)pool_size[i]);
910 }
911
912 }
913
914 /* kick the interrupt handler so that the new buffer pools get
915 replenished or deallocated */
916 ibmveth_interrupt(dev->irq, dev, NULL);
917
888 dev->mtu = new_mtu; 918 dev->mtu = new_mtu;
889 return 0; 919 return 0;
890} 920}
891 921
892static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) 922static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
893{ 923{
894 int rc; 924 int rc, i;
895 struct net_device *netdev; 925 struct net_device *netdev;
896 struct ibmveth_adapter *adapter; 926 struct ibmveth_adapter *adapter = NULL;
897 927
898 unsigned char *mac_addr_p; 928 unsigned char *mac_addr_p;
899 unsigned int *mcastFilterSize_p; 929 unsigned int *mcastFilterSize_p;
@@ -960,23 +990,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
960 netdev->ethtool_ops = &netdev_ethtool_ops; 990 netdev->ethtool_ops = &netdev_ethtool_ops;
961 netdev->change_mtu = ibmveth_change_mtu; 991 netdev->change_mtu = ibmveth_change_mtu;
962 SET_NETDEV_DEV(netdev, &dev->dev); 992 SET_NETDEV_DEV(netdev, &dev->dev);
993 netdev->features |= NETIF_F_LLTX;
994 spin_lock_init(&adapter->stats_lock);
963 995
964 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); 996 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
965 997
966 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize); 998 for(i = 0; i<IbmVethNumBufferPools; i++)
967 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize); 999 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
968 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize); 1000 pool_count[i], pool_size[i]);
969 1001
970 ibmveth_debug_printk("adapter @ 0x%p\n", adapter); 1002 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
971 1003
972 INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
973
974 adapter->buffer_list_dma = DMA_ERROR_CODE; 1004 adapter->buffer_list_dma = DMA_ERROR_CODE;
975 adapter->filter_list_dma = DMA_ERROR_CODE; 1005 adapter->filter_list_dma = DMA_ERROR_CODE;
976 adapter->rx_queue.queue_dma = DMA_ERROR_CODE; 1006 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
977 1007
978 atomic_set(&adapter->not_replenishing, 1);
979
980 ibmveth_debug_printk("registering netdev...\n"); 1008 ibmveth_debug_printk("registering netdev...\n");
981 1009
982 rc = register_netdev(netdev); 1010 rc = register_netdev(netdev);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 51a470da9686..46919a814fca 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -49,6 +49,7 @@
49#define H_SEND_LOGICAL_LAN 0x120 49#define H_SEND_LOGICAL_LAN 0x120
50#define H_MULTICAST_CTRL 0x130 50#define H_MULTICAST_CTRL 0x130
51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C 51#define H_CHANGE_LOGICAL_LAN_MAC 0x14C
52#define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
52 53
53/* hcall macros */ 54/* hcall macros */
54#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \ 55#define h_register_logical_lan(ua, buflst, rxq, fltlst, mac) \
@@ -69,13 +70,15 @@
69#define h_change_logical_lan_mac(ua, mac) \ 70#define h_change_logical_lan_mac(ua, mac) \
70 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac) 71 plpar_hcall_norets(H_CHANGE_LOGICAL_LAN_MAC, ua, mac)
71 72
72#define IbmVethNumBufferPools 3 73#define h_free_logical_lan_buffer(ua, bufsize) \
73#define IbmVethPool0DftSize (1024 * 2) 74 plpar_hcall_norets(H_FREE_LOGICAL_LAN_BUFFER, ua, bufsize)
74#define IbmVethPool1DftSize (1024 * 4) 75
75#define IbmVethPool2DftSize (1024 * 10) 76#define IbmVethNumBufferPools 5
76#define IbmVethPool0DftCnt 256 77#define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
77#define IbmVethPool1DftCnt 256 78
78#define IbmVethPool2DftCnt 256 79/* pool_size should be sorted */
80static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
81static int pool_count[] = { 256, 768, 256, 256, 256 };
79 82
80#define IBM_VETH_INVALID_MAP ((u16)0xffff) 83#define IBM_VETH_INVALID_MAP ((u16)0xffff)
81 84
@@ -90,6 +93,7 @@ struct ibmveth_buff_pool {
90 u16 *free_map; 93 u16 *free_map;
91 dma_addr_t *dma_addr; 94 dma_addr_t *dma_addr;
92 struct sk_buff **skbuff; 95 struct sk_buff **skbuff;
96 int active;
93}; 97};
94 98
95struct ibmveth_rx_q { 99struct ibmveth_rx_q {
@@ -114,10 +118,6 @@ struct ibmveth_adapter {
114 dma_addr_t filter_list_dma; 118 dma_addr_t filter_list_dma;
115 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools]; 119 struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
116 struct ibmveth_rx_q rx_queue; 120 struct ibmveth_rx_q rx_queue;
117 atomic_t not_replenishing;
118
119 /* helper tasks */
120 struct work_struct replenish_task;
121 121
122 /* adapter specific stats */ 122 /* adapter specific stats */
123 u64 replenish_task_cycles; 123 u64 replenish_task_cycles;
@@ -131,6 +131,7 @@ struct ibmveth_adapter {
131 u64 tx_linearize_failed; 131 u64 tx_linearize_failed;
132 u64 tx_map_failed; 132 u64 tx_map_failed;
133 u64 tx_send_failed; 133 u64 tx_send_failed;
134 spinlock_t stats_lock;
134}; 135};
135 136
136struct ibmveth_buf_desc_fields { 137struct ibmveth_buf_desc_fields {
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index ca5914091d3a..d54156f11e61 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -400,5 +400,15 @@ config VIA_FIR
400 To compile it as a module, choose M here: the module will be called 400 To compile it as a module, choose M here: the module will be called
401 via-ircc. 401 via-ircc.
402 402
403config PXA_FICP
404 tristate "Intel PXA2xx Internal FICP"
405 depends on ARCH_PXA && IRDA
406 help
407 Say Y or M here if you want to build support for the PXA2xx
408 built-in IRDA interface which can support both SIR and FIR.
409 This driver relies on platform specific helper routines so
410 available capabilities may vary from one PXA2xx target to
411 another.
412
403endmenu 413endmenu
404 414
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index 29a8bd812b21..e7a8b7f7f5dd 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_SMC_IRCC_FIR) += smsc-ircc2.o
18obj-$(CONFIG_ALI_FIR) += ali-ircc.o 18obj-$(CONFIG_ALI_FIR) += ali-ircc.o
19obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o 19obj-$(CONFIG_VLSI_FIR) += vlsi_ir.o
20obj-$(CONFIG_VIA_FIR) += via-ircc.o 20obj-$(CONFIG_VIA_FIR) += via-ircc.o
21obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
21# Old dongle drivers for old SIR drivers 22# Old dongle drivers for old SIR drivers
22obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o 23obj-$(CONFIG_ESI_DONGLE_OLD) += esi.o
23obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o 24obj-$(CONFIG_TEKRAM_DONGLE_OLD) += tekram.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 0a08c539c051..0282771b1cbb 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1695,11 +1695,9 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
1695 1695
1696freebufs: 1696freebufs:
1697 for (i = 0; i < TX_SLOTS; ++i) 1697 for (i = 0; i < TX_SLOTS; ++i)
1698 if (self->tx_bufs[i]) 1698 kfree (self->tx_bufs[i]);
1699 kfree (self->tx_bufs[i]);
1700 for (i = 0; i < RX_SLOTS; ++i) 1699 for (i = 0; i < RX_SLOTS; ++i)
1701 if (self->rx_bufs[i]) 1700 kfree (self->rx_bufs[i]);
1702 kfree (self->rx_bufs[i]);
1703 kfree(self->ringbuf); 1701 kfree(self->ringbuf);
1704 1702
1705freeregion: 1703freeregion:
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 6c766fdc51a6..c22c0517883c 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1168,10 +1168,8 @@ static inline void irda_usb_close(struct irda_usb_cb *self)
1168 unregister_netdev(self->netdev); 1168 unregister_netdev(self->netdev);
1169 1169
1170 /* Remove the speed buffer */ 1170 /* Remove the speed buffer */
1171 if (self->speed_buff != NULL) { 1171 kfree(self->speed_buff);
1172 kfree(self->speed_buff); 1172 self->speed_buff = NULL;
1173 self->speed_buff = NULL;
1174 }
1175} 1173}
1176 1174
1177/********************** USB CONFIG SUBROUTINES **********************/ 1175/********************** USB CONFIG SUBROUTINES **********************/
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 5971315f3fa0..3d016a498e1d 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -235,8 +235,7 @@ static int irport_close(struct irport_cb *self)
235 __FUNCTION__, self->io.sir_base); 235 __FUNCTION__, self->io.sir_base);
236 release_region(self->io.sir_base, self->io.sir_ext); 236 release_region(self->io.sir_base, self->io.sir_ext);
237 237
238 if (self->tx_buff.head) 238 kfree(self->tx_buff.head);
239 kfree(self->tx_buff.head);
240 239
241 if (self->rx_buff.skb) 240 if (self->rx_buff.skb)
242 kfree_skb(self->rx_buff.skb); 241 kfree_skb(self->rx_buff.skb);
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
new file mode 100644
index 000000000000..b886b07412a6
--- /dev/null
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -0,0 +1,865 @@
1/*
2 * linux/drivers/net/irda/pxaficp_ir.c
3 *
4 * Based on sa1100_ir.c by Russell King
5 *
6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
13 *
14 */
15#include <linux/config.h>
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/netdevice.h>
21#include <linux/slab.h>
22#include <linux/rtnetlink.h>
23#include <linux/interrupt.h>
24#include <linux/dma-mapping.h>
25#include <linux/pm.h>
26
27#include <net/irda/irda.h>
28#include <net/irda/irmod.h>
29#include <net/irda/wrapper.h>
30#include <net/irda/irda_device.h>
31
32#include <asm/irq.h>
33#include <asm/dma.h>
34#include <asm/delay.h>
35#include <asm/hardware.h>
36#include <asm/arch/irda.h>
37#include <asm/arch/pxa-regs.h>
38
39#ifdef CONFIG_MACH_MAINSTONE
40#include <asm/arch/mainstone.h>
41#endif
42
43#define IrSR_RXPL_NEG_IS_ZERO (1<<4)
44#define IrSR_RXPL_POS_IS_ZERO 0x0
45#define IrSR_TXPL_NEG_IS_ZERO (1<<3)
46#define IrSR_TXPL_POS_IS_ZERO 0x0
47#define IrSR_XMODE_PULSE_1_6 (1<<2)
48#define IrSR_XMODE_PULSE_3_16 0x0
49#define IrSR_RCVEIR_IR_MODE (1<<1)
50#define IrSR_RCVEIR_UART_MODE 0x0
51#define IrSR_XMITIR_IR_MODE (1<<0)
52#define IrSR_XMITIR_UART_MODE 0x0
53
54#define IrSR_IR_RECEIVE_ON (\
55 IrSR_RXPL_NEG_IS_ZERO | \
56 IrSR_TXPL_POS_IS_ZERO | \
57 IrSR_XMODE_PULSE_3_16 | \
58 IrSR_RCVEIR_IR_MODE | \
59 IrSR_XMITIR_UART_MODE)
60
61#define IrSR_IR_TRANSMIT_ON (\
62 IrSR_RXPL_NEG_IS_ZERO | \
63 IrSR_TXPL_POS_IS_ZERO | \
64 IrSR_XMODE_PULSE_3_16 | \
65 IrSR_RCVEIR_UART_MODE | \
66 IrSR_XMITIR_IR_MODE)
67
68struct pxa_irda {
69 int speed;
70 int newspeed;
71 unsigned long last_oscr;
72
73 unsigned char *dma_rx_buff;
74 unsigned char *dma_tx_buff;
75 dma_addr_t dma_rx_buff_phy;
76 dma_addr_t dma_tx_buff_phy;
77 unsigned int dma_tx_buff_len;
78 int txdma;
79 int rxdma;
80
81 struct net_device_stats stats;
82 struct irlap_cb *irlap;
83 struct qos_info qos;
84
85 iobuff_t tx_buff;
86 iobuff_t rx_buff;
87
88 struct device *dev;
89 struct pxaficp_platform_data *pdata;
90};
91
92
93#define IS_FIR(si) ((si)->speed >= 4000000)
94#define IRDA_FRAME_SIZE_LIMIT 2047
95
96inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
97{
98 DCSR(si->rxdma) = DCSR_NODESC;
99 DSADR(si->rxdma) = __PREG(ICDR);
100 DTADR(si->rxdma) = si->dma_rx_buff_phy;
101 DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
102 DCSR(si->rxdma) |= DCSR_RUN;
103}
104
105inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
106{
107 DCSR(si->txdma) = DCSR_NODESC;
108 DSADR(si->txdma) = si->dma_tx_buff_phy;
109 DTADR(si->txdma) = __PREG(ICDR);
110 DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
111 DCSR(si->txdma) |= DCSR_RUN;
112}
113
114/*
115 * Set the IrDA communications speed.
116 */
117static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
118{
119 unsigned long flags;
120 unsigned int divisor;
121
122 switch (speed) {
123 case 9600: case 19200: case 38400:
124 case 57600: case 115200:
125
126 /* refer to PXA250/210 Developer's Manual 10-7 */
127 /* BaudRate = 14.7456 MHz / (16*Divisor) */
128 divisor = 14745600 / (16 * speed);
129
130 local_irq_save(flags);
131
132 if (IS_FIR(si)) {
133 /* stop RX DMA */
134 DCSR(si->rxdma) &= ~DCSR_RUN;
135 /* disable FICP */
136 ICCR0 = 0;
137 pxa_set_cken(CKEN13_FICP, 0);
138
139 /* set board transceiver to SIR mode */
140 si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
141
142 /* configure GPIO46/47 */
143 pxa_gpio_mode(GPIO46_STRXD_MD);
144 pxa_gpio_mode(GPIO47_STTXD_MD);
145
146 /* enable the STUART clock */
147 pxa_set_cken(CKEN5_STUART, 1);
148 }
149
150 /* disable STUART first */
151 STIER = 0;
152
153 /* access DLL & DLH */
154 STLCR |= LCR_DLAB;
155 STDLL = divisor & 0xff;
156 STDLH = divisor >> 8;
157 STLCR &= ~LCR_DLAB;
158
159 si->speed = speed;
160 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
161 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
162
163 local_irq_restore(flags);
164 break;
165
166 case 4000000:
167 local_irq_save(flags);
168
169 /* disable STUART */
170 STIER = 0;
171 STISR = 0;
172 pxa_set_cken(CKEN5_STUART, 0);
173
174 /* disable FICP first */
175 ICCR0 = 0;
176
177 /* set board transceiver to FIR mode */
178 si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
179
180 /* configure GPIO46/47 */
181 pxa_gpio_mode(GPIO46_ICPRXD_MD);
182 pxa_gpio_mode(GPIO47_ICPTXD_MD);
183
184 /* enable the FICP clock */
185 pxa_set_cken(CKEN13_FICP, 1);
186
187 si->speed = speed;
188 pxa_irda_fir_dma_rx_start(si);
189 ICCR0 = ICCR0_ITR | ICCR0_RXE;
190
191 local_irq_restore(flags);
192 break;
193
194 default:
195 return -EINVAL;
196 }
197
198 return 0;
199}
200
201/* SIR interrupt service routine. */
202static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id, struct pt_regs *regs)
203{
204 struct net_device *dev = dev_id;
205 struct pxa_irda *si = netdev_priv(dev);
206 int iir, lsr, data;
207
208 iir = STIIR;
209
210 switch (iir & 0x0F) {
211 case 0x06: /* Receiver Line Status */
212 lsr = STLSR;
213 while (lsr & LSR_FIFOE) {
214 data = STRBR;
215 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
216 printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
217 si->stats.rx_errors++;
218 if (lsr & LSR_FE)
219 si->stats.rx_frame_errors++;
220 if (lsr & LSR_OE)
221 si->stats.rx_fifo_errors++;
222 } else {
223 si->stats.rx_bytes++;
224 async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
225 }
226 lsr = STLSR;
227 }
228 dev->last_rx = jiffies;
229 si->last_oscr = OSCR;
230 break;
231
232 case 0x04: /* Received Data Available */
233 /* forth through */
234
235 case 0x0C: /* Character Timeout Indication */
236 do {
237 si->stats.rx_bytes++;
238 async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
239 } while (STLSR & LSR_DR);
240 dev->last_rx = jiffies;
241 si->last_oscr = OSCR;
242 break;
243
244 case 0x02: /* Transmit FIFO Data Request */
245 while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
246 STTHR = *si->tx_buff.data++;
247 si->tx_buff.len -= 1;
248 }
249
250 if (si->tx_buff.len == 0) {
251 si->stats.tx_packets++;
252 si->stats.tx_bytes += si->tx_buff.data -
253 si->tx_buff.head;
254
255 /* We need to ensure that the transmitter has finished. */
256 while ((STLSR & LSR_TEMT) == 0)
257 cpu_relax();
258 si->last_oscr = OSCR;
259
260 /*
261 * Ok, we've finished transmitting. Now enable
262 * the receiver. Sometimes we get a receive IRQ
263 * immediately after a transmit...
264 */
265 if (si->newspeed) {
266 pxa_irda_set_speed(si, si->newspeed);
267 si->newspeed = 0;
268 } else {
269 /* enable IR Receiver, disable IR Transmitter */
270 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
271 /* enable STUART and receive interrupts */
272 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
273 }
274 /* I'm hungry! */
275 netif_wake_queue(dev);
276 }
277 break;
278 }
279
280 return IRQ_HANDLED;
281}
282
283/* FIR Receive DMA interrupt handler */
284static void pxa_irda_fir_dma_rx_irq(int channel, void *data, struct pt_regs *regs)
285{
286 int dcsr = DCSR(channel);
287
288 DCSR(channel) = dcsr & ~DCSR_RUN;
289
290 printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
291}
292
293/* FIR Transmit DMA interrupt handler */
294static void pxa_irda_fir_dma_tx_irq(int channel, void *data, struct pt_regs *regs)
295{
296 struct net_device *dev = data;
297 struct pxa_irda *si = netdev_priv(dev);
298 int dcsr;
299
300 dcsr = DCSR(channel);
301 DCSR(channel) = dcsr & ~DCSR_RUN;
302
303 if (dcsr & DCSR_ENDINTR) {
304 si->stats.tx_packets++;
305 si->stats.tx_bytes += si->dma_tx_buff_len;
306 } else {
307 si->stats.tx_errors++;
308 }
309
310 while (ICSR1 & ICSR1_TBY)
311 cpu_relax();
312 si->last_oscr = OSCR;
313
314 /*
315 * HACK: It looks like the TBY bit is dropped too soon.
316 * Without this delay things break.
317 */
318 udelay(120);
319
320 if (si->newspeed) {
321 pxa_irda_set_speed(si, si->newspeed);
322 si->newspeed = 0;
323 } else {
324 ICCR0 = 0;
325 pxa_irda_fir_dma_rx_start(si);
326 ICCR0 = ICCR0_ITR | ICCR0_RXE;
327 }
328 netif_wake_queue(dev);
329}
330
331/* EIF(Error in FIFO/End in Frame) handler for FIR */
332static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev)
333{
334 unsigned int len, stat, data;
335
336 /* Get the current data position. */
337 len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
338
339 do {
340 /* Read Status, and then Data. */
341 stat = ICSR1;
342 rmb();
343 data = ICDR;
344
345 if (stat & (ICSR1_CRE | ICSR1_ROR)) {
346 si->stats.rx_errors++;
347 if (stat & ICSR1_CRE) {
348 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
349 si->stats.rx_crc_errors++;
350 }
351 if (stat & ICSR1_ROR) {
352 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
353 si->stats.rx_frame_errors++;
354 }
355 } else {
356 si->dma_rx_buff[len++] = data;
357 }
358 /* If we hit the end of frame, there's no point in continuing. */
359 if (stat & ICSR1_EOF)
360 break;
361 } while (ICSR0 & ICSR0_EIF);
362
363 if (stat & ICSR1_EOF) {
364 /* end of frame. */
365 struct sk_buff *skb = alloc_skb(len+1,GFP_ATOMIC);
366 if (!skb) {
367 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
368 si->stats.rx_dropped++;
369 return;
370 }
371
372 /* Align IP header to 20 bytes */
373 skb_reserve(skb, 1);
374 memcpy(skb->data, si->dma_rx_buff, len);
375 skb_put(skb, len);
376
377 /* Feed it to IrLAP */
378 skb->dev = dev;
379 skb->mac.raw = skb->data;
380 skb->protocol = htons(ETH_P_IRDA);
381 netif_rx(skb);
382
383 si->stats.rx_packets++;
384 si->stats.rx_bytes += len;
385
386 dev->last_rx = jiffies;
387 }
388}
389
390/* FIR interrupt handler */
391static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id, struct pt_regs *regs)
392{
393 struct net_device *dev = dev_id;
394 struct pxa_irda *si = netdev_priv(dev);
395 int icsr0;
396
397 /* stop RX DMA */
398 DCSR(si->rxdma) &= ~DCSR_RUN;
399 si->last_oscr = OSCR;
400 icsr0 = ICSR0;
401
402 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
403 if (icsr0 & ICSR0_FRE) {
404 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
405 si->stats.rx_frame_errors++;
406 } else {
407 printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
408 si->stats.rx_errors++;
409 }
410 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
411 }
412
413 if (icsr0 & ICSR0_EIF) {
414 /* An error in FIFO occured, or there is a end of frame */
415 pxa_irda_fir_irq_eif(si, dev);
416 }
417
418 ICCR0 = 0;
419 pxa_irda_fir_dma_rx_start(si);
420 ICCR0 = ICCR0_ITR | ICCR0_RXE;
421
422 return IRQ_HANDLED;
423}
424
425/* hard_xmit interface of irda device */
426static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
427{
428 struct pxa_irda *si = netdev_priv(dev);
429 int speed = irda_get_next_speed(skb);
430
431 /*
432 * Does this packet contain a request to change the interface
433 * speed? If so, remember it until we complete the transmission
434 * of this frame.
435 */
436 if (speed != si->speed && speed != -1)
437 si->newspeed = speed;
438
439 /*
440 * If this is an empty frame, we can bypass a lot.
441 */
442 if (skb->len == 0) {
443 if (si->newspeed) {
444 si->newspeed = 0;
445 pxa_irda_set_speed(si, speed);
446 }
447 dev_kfree_skb(skb);
448 return 0;
449 }
450
451 netif_stop_queue(dev);
452
453 if (!IS_FIR(si)) {
454 si->tx_buff.data = si->tx_buff.head;
455 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
456
457 /* Disable STUART interrupts and switch to transmit mode. */
458 STIER = 0;
459 STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
460
461 /* enable STUART and transmit interrupts */
462 STIER = IER_UUE | IER_TIE;
463 } else {
464 unsigned long mtt = irda_get_mtt(skb);
465
466 si->dma_tx_buff_len = skb->len;
467 memcpy(si->dma_tx_buff, skb->data, skb->len);
468
469 if (mtt)
470 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
471 cpu_relax();
472
473 /* stop RX DMA, disable FICP */
474 DCSR(si->rxdma) &= ~DCSR_RUN;
475 ICCR0 = 0;
476
477 pxa_irda_fir_dma_tx_start(si);
478 ICCR0 = ICCR0_ITR | ICCR0_TXE;
479 }
480
481 dev_kfree_skb(skb);
482 dev->trans_start = jiffies;
483 return 0;
484}
485
486static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
487{
488 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
489 struct pxa_irda *si = netdev_priv(dev);
490 int ret;
491
492 switch (cmd) {
493 case SIOCSBANDWIDTH:
494 ret = -EPERM;
495 if (capable(CAP_NET_ADMIN)) {
496 /*
497 * We are unable to set the speed if the
498 * device is not running.
499 */
500 if (netif_running(dev)) {
501 ret = pxa_irda_set_speed(si,
502 rq->ifr_baudrate);
503 } else {
504 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
505 ret = 0;
506 }
507 }
508 break;
509
510 case SIOCSMEDIABUSY:
511 ret = -EPERM;
512 if (capable(CAP_NET_ADMIN)) {
513 irda_device_set_media_busy(dev, TRUE);
514 ret = 0;
515 }
516 break;
517
518 case SIOCGRECEIVING:
519 ret = 0;
520 rq->ifr_receiving = IS_FIR(si) ? 0
521 : si->rx_buff.state != OUTSIDE_FRAME;
522 break;
523
524 default:
525 ret = -EOPNOTSUPP;
526 break;
527 }
528
529 return ret;
530}
531
532static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
533{
534 struct pxa_irda *si = netdev_priv(dev);
535 return &si->stats;
536}
537
538static void pxa_irda_startup(struct pxa_irda *si)
539{
540 /* Disable STUART interrupts */
541 STIER = 0;
542 /* enable STUART interrupt to the processor */
543 STMCR = MCR_OUT2;
544 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
545 STLCR = LCR_WLS0 | LCR_WLS1;
546 /* enable FIFO, we use FIFO to improve performance */
547 STFCR = FCR_TRFIFOE | FCR_ITL_32;
548
549 /* disable FICP */
550 ICCR0 = 0;
551 /* configure FICP ICCR2 */
552 ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
553
554 /* configure DMAC */
555 DRCMR17 = si->rxdma | DRCMR_MAPVLD;
556 DRCMR18 = si->txdma | DRCMR_MAPVLD;
557
558 /* force SIR reinitialization */
559 si->speed = 4000000;
560 pxa_irda_set_speed(si, 9600);
561
562 printk(KERN_DEBUG "pxa_ir: irda startup\n");
563}
564
565static void pxa_irda_shutdown(struct pxa_irda *si)
566{
567 unsigned long flags;
568
569 local_irq_save(flags);
570
571 /* disable STUART and interrupt */
572 STIER = 0;
573 /* disable STUART SIR mode */
574 STISR = 0;
575 /* disable the STUART clock */
576 pxa_set_cken(CKEN5_STUART, 0);
577
578 /* disable DMA */
579 DCSR(si->txdma) &= ~DCSR_RUN;
580 DCSR(si->rxdma) &= ~DCSR_RUN;
581 /* disable FICP */
582 ICCR0 = 0;
583 /* disable the FICP clock */
584 pxa_set_cken(CKEN13_FICP, 0);
585
586 DRCMR17 = 0;
587 DRCMR18 = 0;
588
589 local_irq_restore(flags);
590
591 /* power off board transceiver */
592 si->pdata->transceiver_mode(si->dev, IR_OFF);
593
594 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
595}
596
597static int pxa_irda_start(struct net_device *dev)
598{
599 struct pxa_irda *si = netdev_priv(dev);
600 int err;
601
602 si->speed = 9600;
603
604 err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
605 if (err)
606 goto err_irq1;
607
608 err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
609 if (err)
610 goto err_irq2;
611
612 /*
613 * The interrupt must remain disabled for now.
614 */
615 disable_irq(IRQ_STUART);
616 disable_irq(IRQ_ICP);
617
618 err = -EBUSY;
619 si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
620 if (si->rxdma < 0)
621 goto err_rx_dma;
622
623 si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
624 if (si->txdma < 0)
625 goto err_tx_dma;
626
627 err = -ENOMEM;
628 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
629 &si->dma_rx_buff_phy, GFP_KERNEL );
630 if (!si->dma_rx_buff)
631 goto err_dma_rx_buff;
632
633 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
634 &si->dma_tx_buff_phy, GFP_KERNEL );
635 if (!si->dma_tx_buff)
636 goto err_dma_tx_buff;
637
638 /* Setup the serial port for the initial speed. */
639 pxa_irda_startup(si);
640
641 /*
642 * Open a new IrLAP layer instance.
643 */
644 si->irlap = irlap_open(dev, &si->qos, "pxa");
645 err = -ENOMEM;
646 if (!si->irlap)
647 goto err_irlap;
648
649 /*
650 * Now enable the interrupt and start the queue
651 */
652 enable_irq(IRQ_STUART);
653 enable_irq(IRQ_ICP);
654 netif_start_queue(dev);
655
656 printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
657
658 return 0;
659
660err_irlap:
661 pxa_irda_shutdown(si);
662 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
663err_dma_tx_buff:
664 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
665err_dma_rx_buff:
666 pxa_free_dma(si->txdma);
667err_tx_dma:
668 pxa_free_dma(si->rxdma);
669err_rx_dma:
670 free_irq(IRQ_ICP, dev);
671err_irq2:
672 free_irq(IRQ_STUART, dev);
673err_irq1:
674
675 return err;
676}
677
678static int pxa_irda_stop(struct net_device *dev)
679{
680 struct pxa_irda *si = netdev_priv(dev);
681
682 netif_stop_queue(dev);
683
684 pxa_irda_shutdown(si);
685
686 /* Stop IrLAP */
687 if (si->irlap) {
688 irlap_close(si->irlap);
689 si->irlap = NULL;
690 }
691
692 free_irq(IRQ_STUART, dev);
693 free_irq(IRQ_ICP, dev);
694
695 pxa_free_dma(si->rxdma);
696 pxa_free_dma(si->txdma);
697
698 if (si->dma_rx_buff)
699 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
700 if (si->dma_tx_buff)
701 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
702
703 printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
704 return 0;
705}
706
707static int pxa_irda_suspend(struct device *_dev, pm_message_t state)
708{
709 struct net_device *dev = dev_get_drvdata(_dev);
710 struct pxa_irda *si;
711
712 if (dev && netif_running(dev)) {
713 si = netdev_priv(dev);
714 netif_device_detach(dev);
715 pxa_irda_shutdown(si);
716 }
717
718 return 0;
719}
720
721static int pxa_irda_resume(struct device *_dev)
722{
723 struct net_device *dev = dev_get_drvdata(_dev);
724 struct pxa_irda *si;
725
726 if (dev && netif_running(dev)) {
727 si = netdev_priv(dev);
728 pxa_irda_startup(si);
729 netif_device_attach(dev);
730 netif_wake_queue(dev);
731 }
732
733 return 0;
734}
735
736
737static int pxa_irda_init_iobuf(iobuff_t *io, int size)
738{
739 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
740 if (io->head != NULL) {
741 io->truesize = size;
742 io->in_frame = FALSE;
743 io->state = OUTSIDE_FRAME;
744 io->data = io->head;
745 }
746 return io->head ? 0 : -ENOMEM;
747}
748
749static int pxa_irda_probe(struct device *_dev)
750{
751 struct platform_device *pdev = to_platform_device(_dev);
752 struct net_device *dev;
753 struct pxa_irda *si;
754 unsigned int baudrate_mask;
755 int err;
756
757 if (!pdev->dev.platform_data)
758 return -ENODEV;
759
760 err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
761 if (err)
762 goto err_mem_1;
763
764 err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
765 if (err)
766 goto err_mem_2;
767
768 dev = alloc_irdadev(sizeof(struct pxa_irda));
769 if (!dev)
770 goto err_mem_3;
771
772 si = netdev_priv(dev);
773 si->dev = &pdev->dev;
774 si->pdata = pdev->dev.platform_data;
775
776 /*
777 * Initialise the SIR buffers
778 */
779 err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
780 if (err)
781 goto err_mem_4;
782 err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
783 if (err)
784 goto err_mem_5;
785
786 dev->hard_start_xmit = pxa_irda_hard_xmit;
787 dev->open = pxa_irda_start;
788 dev->stop = pxa_irda_stop;
789 dev->do_ioctl = pxa_irda_ioctl;
790 dev->get_stats = pxa_irda_stats;
791
792 irda_init_max_qos_capabilies(&si->qos);
793
794 baudrate_mask = 0;
795 if (si->pdata->transceiver_cap & IR_SIRMODE)
796 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
797 if (si->pdata->transceiver_cap & IR_FIRMODE)
798 baudrate_mask |= IR_4000000 << 8;
799
800 si->qos.baud_rate.bits &= baudrate_mask;
801 si->qos.min_turn_time.bits = 7; /* 1ms or more */
802
803 irda_qos_bits_to_value(&si->qos);
804
805 err = register_netdev(dev);
806
807 if (err == 0)
808 dev_set_drvdata(&pdev->dev, dev);
809
810 if (err) {
811 kfree(si->tx_buff.head);
812err_mem_5:
813 kfree(si->rx_buff.head);
814err_mem_4:
815 free_netdev(dev);
816err_mem_3:
817 release_mem_region(__PREG(FICP), 0x1c);
818err_mem_2:
819 release_mem_region(__PREG(STUART), 0x24);
820 }
821err_mem_1:
822 return err;
823}
824
825static int pxa_irda_remove(struct device *_dev)
826{
827 struct net_device *dev = dev_get_drvdata(_dev);
828
829 if (dev) {
830 struct pxa_irda *si = netdev_priv(dev);
831 unregister_netdev(dev);
832 kfree(si->tx_buff.head);
833 kfree(si->rx_buff.head);
834 free_netdev(dev);
835 }
836
837 release_mem_region(__PREG(STUART), 0x24);
838 release_mem_region(__PREG(FICP), 0x1c);
839
840 return 0;
841}
842
843static struct device_driver pxa_ir_driver = {
844 .name = "pxa2xx-ir",
845 .bus = &platform_bus_type,
846 .probe = pxa_irda_probe,
847 .remove = pxa_irda_remove,
848 .suspend = pxa_irda_suspend,
849 .resume = pxa_irda_resume,
850};
851
852static int __init pxa_irda_init(void)
853{
854 return driver_register(&pxa_ir_driver);
855}
856
857static void __exit pxa_irda_exit(void)
858{
859 driver_unregister(&pxa_ir_driver);
860}
861
862module_init(pxa_irda_init);
863module_exit(pxa_irda_exit);
864
865MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 8d34ac60d906..06883309916d 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -291,12 +291,12 @@ static void sa1100_irda_shutdown(struct sa1100_irda *si)
291/* 291/*
292 * Suspend the IrDA interface. 292 * Suspend the IrDA interface.
293 */ 293 */
294static int sa1100_irda_suspend(struct device *_dev, pm_message_t state, u32 level) 294static int sa1100_irda_suspend(struct device *_dev, pm_message_t state)
295{ 295{
296 struct net_device *dev = dev_get_drvdata(_dev); 296 struct net_device *dev = dev_get_drvdata(_dev);
297 struct sa1100_irda *si; 297 struct sa1100_irda *si;
298 298
299 if (!dev || level != SUSPEND_DISABLE) 299 if (!dev)
300 return 0; 300 return 0;
301 301
302 si = dev->priv; 302 si = dev->priv;
@@ -316,12 +316,12 @@ static int sa1100_irda_suspend(struct device *_dev, pm_message_t state, u32 leve
316/* 316/*
317 * Resume the IrDA interface. 317 * Resume the IrDA interface.
318 */ 318 */
319static int sa1100_irda_resume(struct device *_dev, u32 level) 319static int sa1100_irda_resume(struct device *_dev)
320{ 320{
321 struct net_device *dev = dev_get_drvdata(_dev); 321 struct net_device *dev = dev_get_drvdata(_dev);
322 struct sa1100_irda *si; 322 struct sa1100_irda *si;
323 323
324 if (!dev || level != RESUME_ENABLE) 324 if (!dev)
325 return 0; 325 return 0;
326 326
327 si = dev->priv; 327 si = dev->priv;
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index efc5a8870565..df22b8b532e7 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -490,8 +490,7 @@ static void sirdev_free_buffers(struct sir_dev *dev)
490{ 490{
491 if (dev->rx_buff.skb) 491 if (dev->rx_buff.skb)
492 kfree_skb(dev->rx_buff.skb); 492 kfree_skb(dev->rx_buff.skb);
493 if (dev->tx_buff.head) 493 kfree(dev->tx_buff.head);
494 kfree(dev->tx_buff.head);
495 dev->rx_buff.head = dev->tx_buff.head = NULL; 494 dev->rx_buff.head = dev->tx_buff.head = NULL;
496 dev->rx_buff.skb = NULL; 495 dev->rx_buff.skb = NULL;
497} 496}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index dd89bda1f131..140b7cdb1f7e 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -213,8 +213,8 @@ static int smsc_ircc_probe_transceiver_smsc_ircc_atc(int fir_base);
213 213
214/* Power Management */ 214/* Power Management */
215 215
216static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level); 216static int smsc_ircc_suspend(struct device *dev, pm_message_t state);
217static int smsc_ircc_resume(struct device *dev, u32 level); 217static int smsc_ircc_resume(struct device *dev);
218 218
219static struct device_driver smsc_ircc_driver = { 219static struct device_driver smsc_ircc_driver = {
220 .name = SMSC_IRCC2_DRIVER_NAME, 220 .name = SMSC_IRCC2_DRIVER_NAME,
@@ -638,21 +638,14 @@ static void smsc_ircc_setup_qos(struct smsc_ircc_cb *self)
638 */ 638 */
639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self) 639static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
640{ 640{
641 int iobase, ir_mode, ctrl, fast; 641 int iobase = self->io.fir_base;
642
643 IRDA_ASSERT(self != NULL, return;);
644
645 iobase = self->io.fir_base;
646 ir_mode = IRCC_CFGA_IRDA_SIR_A;
647 ctrl = 0;
648 fast = 0;
649 642
650 register_bank(iobase, 0); 643 register_bank(iobase, 0);
651 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER); 644 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
652 outb(0x00, iobase + IRCC_MASTER); 645 outb(0x00, iobase + IRCC_MASTER);
653 646
654 register_bank(iobase, 1); 647 register_bank(iobase, 1);
655 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | ir_mode), 648 outb(((inb(iobase + IRCC_SCE_CFGA) & 0x87) | IRCC_CFGA_IRDA_SIR_A),
656 iobase + IRCC_SCE_CFGA); 649 iobase + IRCC_SCE_CFGA);
657 650
658#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */ 651#ifdef smsc_669 /* Uses pin 88/89 for Rx/Tx */
@@ -666,10 +659,10 @@ static void smsc_ircc_init_chip(struct smsc_ircc_cb *self)
666 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD); 659 outb(SMSC_IRCC2_FIFO_THRESHOLD, iobase + IRCC_FIFO_THRESHOLD);
667 660
668 register_bank(iobase, 4); 661 register_bank(iobase, 4);
669 outb((inb(iobase + IRCC_CONTROL) & 0x30) | ctrl, iobase + IRCC_CONTROL); 662 outb((inb(iobase + IRCC_CONTROL) & 0x30), iobase + IRCC_CONTROL);
670 663
671 register_bank(iobase, 0); 664 register_bank(iobase, 0);
672 outb(fast, iobase + IRCC_LCR_A); 665 outb(0, iobase + IRCC_LCR_A);
673 666
674 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED); 667 smsc_ircc_set_sir_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
675 668
@@ -1556,6 +1549,46 @@ static int ircc_is_receiving(struct smsc_ircc_cb *self)
1556} 1549}
1557#endif /* unused */ 1550#endif /* unused */
1558 1551
1552static int smsc_ircc_request_irq(struct smsc_ircc_cb *self)
1553{
1554 int error;
1555
1556 error = request_irq(self->io.irq, smsc_ircc_interrupt, 0,
1557 self->netdev->name, self->netdev);
1558 if (error)
1559 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d, err=%d\n",
1560 __FUNCTION__, self->io.irq, error);
1561
1562 return error;
1563}
1564
1565static void smsc_ircc_start_interrupts(struct smsc_ircc_cb *self)
1566{
1567 unsigned long flags;
1568
1569 spin_lock_irqsave(&self->lock, flags);
1570
1571 self->io.speed = 0;
1572 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1573
1574 spin_unlock_irqrestore(&self->lock, flags);
1575}
1576
1577static void smsc_ircc_stop_interrupts(struct smsc_ircc_cb *self)
1578{
1579 int iobase = self->io.fir_base;
1580 unsigned long flags;
1581
1582 spin_lock_irqsave(&self->lock, flags);
1583
1584 register_bank(iobase, 0);
1585 outb(0, iobase + IRCC_IER);
1586 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1587 outb(0x00, iobase + IRCC_MASTER);
1588
1589 spin_unlock_irqrestore(&self->lock, flags);
1590}
1591
1559 1592
1560/* 1593/*
1561 * Function smsc_ircc_net_open (dev) 1594 * Function smsc_ircc_net_open (dev)
@@ -1567,7 +1600,6 @@ static int smsc_ircc_net_open(struct net_device *dev)
1567{ 1600{
1568 struct smsc_ircc_cb *self; 1601 struct smsc_ircc_cb *self;
1569 char hwname[16]; 1602 char hwname[16];
1570 unsigned long flags;
1571 1603
1572 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1604 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1573 1605
@@ -1575,6 +1607,11 @@ static int smsc_ircc_net_open(struct net_device *dev)
1575 self = netdev_priv(dev); 1607 self = netdev_priv(dev);
1576 IRDA_ASSERT(self != NULL, return 0;); 1608 IRDA_ASSERT(self != NULL, return 0;);
1577 1609
1610 if (self->io.suspended) {
1611 IRDA_DEBUG(0, "%s(), device is suspended\n", __FUNCTION__);
1612 return -EAGAIN;
1613 }
1614
1578 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name, 1615 if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
1579 (void *) dev)) { 1616 (void *) dev)) {
1580 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n", 1617 IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
@@ -1582,11 +1619,7 @@ static int smsc_ircc_net_open(struct net_device *dev)
1582 return -EAGAIN; 1619 return -EAGAIN;
1583 } 1620 }
1584 1621
1585 spin_lock_irqsave(&self->lock, flags); 1622 smsc_ircc_start_interrupts(self);
1586 /*smsc_ircc_sir_start(self);*/
1587 self->io.speed = 0;
1588 smsc_ircc_change_speed(self, SMSC_IRCC2_C_IRDA_FALLBACK_SPEED);
1589 spin_unlock_irqrestore(&self->lock, flags);
1590 1623
1591 /* Give self a hardware name */ 1624 /* Give self a hardware name */
1592 /* It would be cool to offer the chip revision here - Jean II */ 1625 /* It would be cool to offer the chip revision here - Jean II */
@@ -1639,37 +1672,63 @@ static int smsc_ircc_net_close(struct net_device *dev)
1639 irlap_close(self->irlap); 1672 irlap_close(self->irlap);
1640 self->irlap = NULL; 1673 self->irlap = NULL;
1641 1674
1642 free_irq(self->io.irq, dev); 1675 smsc_ircc_stop_interrupts(self);
1676
1677 /* if we are called from smsc_ircc_resume we don't have IRQ reserved */
1678 if (!self->io.suspended)
1679 free_irq(self->io.irq, dev);
1680
1643 disable_dma(self->io.dma); 1681 disable_dma(self->io.dma);
1644 free_dma(self->io.dma); 1682 free_dma(self->io.dma);
1645 1683
1646 return 0; 1684 return 0;
1647} 1685}
1648 1686
1649static int smsc_ircc_suspend(struct device *dev, pm_message_t state, u32 level) 1687static int smsc_ircc_suspend(struct device *dev, pm_message_t state)
1650{ 1688{
1651 struct smsc_ircc_cb *self = dev_get_drvdata(dev); 1689 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1652 1690
1653 IRDA_MESSAGE("%s, Suspending\n", driver_name); 1691 if (!self->io.suspended) {
1692 IRDA_DEBUG(1, "%s, Suspending\n", driver_name);
1654 1693
1655 if (level == SUSPEND_DISABLE && !self->io.suspended) { 1694 rtnl_lock();
1656 smsc_ircc_net_close(self->netdev); 1695 if (netif_running(self->netdev)) {
1696 netif_device_detach(self->netdev);
1697 smsc_ircc_stop_interrupts(self);
1698 free_irq(self->io.irq, self->netdev);
1699 disable_dma(self->io.dma);
1700 }
1657 self->io.suspended = 1; 1701 self->io.suspended = 1;
1702 rtnl_unlock();
1658 } 1703 }
1659 1704
1660 return 0; 1705 return 0;
1661} 1706}
1662 1707
1663static int smsc_ircc_resume(struct device *dev, u32 level) 1708static int smsc_ircc_resume(struct device *dev)
1664{ 1709{
1665 struct smsc_ircc_cb *self = dev_get_drvdata(dev); 1710 struct smsc_ircc_cb *self = dev_get_drvdata(dev);
1666 1711
1667 if (level == RESUME_ENABLE && self->io.suspended) { 1712 if (self->io.suspended) {
1668 1713 IRDA_DEBUG(1, "%s, Waking up\n", driver_name);
1669 smsc_ircc_net_open(self->netdev); 1714
1715 rtnl_lock();
1716 smsc_ircc_init_chip(self);
1717 if (netif_running(self->netdev)) {
1718 if (smsc_ircc_request_irq(self)) {
1719 /*
1720 * Don't fail resume process, just kill this
1721 * network interface
1722 */
1723 unregister_netdevice(self->netdev);
1724 } else {
1725 enable_dma(self->io.dma);
1726 smsc_ircc_start_interrupts(self);
1727 netif_device_attach(self->netdev);
1728 }
1729 }
1670 self->io.suspended = 0; 1730 self->io.suspended = 0;
1671 1731 rtnl_unlock();
1672 IRDA_MESSAGE("%s, Waking up\n", driver_name);
1673 } 1732 }
1674 return 0; 1733 return 0;
1675} 1734}
@@ -1682,9 +1741,6 @@ static int smsc_ircc_resume(struct device *dev, u32 level)
1682 */ 1741 */
1683static int __exit smsc_ircc_close(struct smsc_ircc_cb *self) 1742static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1684{ 1743{
1685 int iobase;
1686 unsigned long flags;
1687
1688 IRDA_DEBUG(1, "%s\n", __FUNCTION__); 1744 IRDA_DEBUG(1, "%s\n", __FUNCTION__);
1689 1745
1690 IRDA_ASSERT(self != NULL, return -1;); 1746 IRDA_ASSERT(self != NULL, return -1;);
@@ -1694,22 +1750,7 @@ static int __exit smsc_ircc_close(struct smsc_ircc_cb *self)
1694 /* Remove netdevice */ 1750 /* Remove netdevice */
1695 unregister_netdev(self->netdev); 1751 unregister_netdev(self->netdev);
1696 1752
1697 /* Make sure the irq handler is not exectuting */ 1753 smsc_ircc_stop_interrupts(self);
1698 spin_lock_irqsave(&self->lock, flags);
1699
1700 /* Stop interrupts */
1701 iobase = self->io.fir_base;
1702 register_bank(iobase, 0);
1703 outb(0, iobase + IRCC_IER);
1704 outb(IRCC_MASTER_RESET, iobase + IRCC_MASTER);
1705 outb(0x00, iobase + IRCC_MASTER);
1706#if 0
1707 /* Reset to SIR mode */
1708 register_bank(iobase, 1);
1709 outb(IRCC_CFGA_IRDA_SIR_A|IRCC_CFGA_TX_POLARITY, iobase + IRCC_SCE_CFGA);
1710 outb(IRCC_CFGB_IR, iobase + IRCC_SCE_CFGB);
1711#endif
1712 spin_unlock_irqrestore(&self->lock, flags);
1713 1754
1714 /* Release the PORTS that this driver is using */ 1755 /* Release the PORTS that this driver is using */
1715 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__, 1756 IRDA_DEBUG(0, "%s(), releasing 0x%03x\n", __FUNCTION__,
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 15f207323d97..3961a754e920 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -678,10 +678,9 @@ static void turnaround_delay(const struct stir_cb *stir, long us)
678 return; 678 return;
679 679
680 ticks = us / (1000000 / HZ); 680 ticks = us / (1000000 / HZ);
681 if (ticks > 0) { 681 if (ticks > 0)
682 current->state = TASK_INTERRUPTIBLE; 682 schedule_timeout_interruptible(1 + ticks);
683 schedule_timeout(1 + ticks); 683 else
684 } else
685 udelay(us); 684 udelay(us);
686} 685}
687 686
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 651c5a6578fd..a9f49f058cfb 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -473,8 +473,7 @@ static int vlsi_free_ring(struct vlsi_ring *r)
473 rd_set_addr_status(rd, 0, 0); 473 rd_set_addr_status(rd, 0, 0);
474 if (busaddr) 474 if (busaddr)
475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir); 475 pci_unmap_single(r->pdev, busaddr, r->len, r->dir);
476 if (rd->buf) 476 kfree(rd->buf);
477 kfree(rd->buf);
478 } 477 }
479 kfree(r); 478 kfree(r);
480 return 0; 479 return 0;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 9d026ed77ddd..04e47189d830 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -645,11 +645,10 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
645 645
646 mod_timer(&adapter->blink_timer, jiffies); 646 mod_timer(&adapter->blink_timer, jiffies);
647 647
648 set_current_state(TASK_INTERRUPTIBLE); 648 if (data)
649 if(data) 649 schedule_timeout_interruptible(data * HZ);
650 schedule_timeout(data * HZ);
651 else 650 else
652 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 651 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
653 652
654 del_timer_sync(&adapter->blink_timer); 653 del_timer_sync(&adapter->blink_timer);
655 ixgb_led_off(&adapter->hw); 654 ixgb_led_off(&adapter->hw);
@@ -723,6 +722,7 @@ struct ethtool_ops ixgb_ethtool_ops = {
723 .phys_id = ixgb_phys_id, 722 .phys_id = ixgb_phys_id,
724 .get_stats_count = ixgb_get_stats_count, 723 .get_stats_count = ixgb_get_stats_count,
725 .get_ethtool_stats = ixgb_get_ethtool_stats, 724 .get_ethtool_stats = ixgb_get_ethtool_stats,
725 .get_perm_addr = ethtool_op_get_perm_addr,
726}; 726};
727 727
728void ixgb_set_ethtool_ops(struct net_device *netdev) 728void ixgb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 89d6d69be382..176680cb153e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -460,8 +460,9 @@ ixgb_probe(struct pci_dev *pdev,
460 } 460 }
461 461
462 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr); 462 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
463 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
463 464
464 if(!is_valid_ether_addr(netdev->dev_addr)) { 465 if(!is_valid_ether_addr(netdev->perm_addr)) {
465 err = -EIO; 466 err = -EIO;
466 goto err_eeprom; 467 goto err_eeprom;
467 } 468 }
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index b4929beb33b2..1d75ca0bb939 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -298,7 +298,7 @@ enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_
298static unsigned char lance_need_isa_bounce_buffers = 1; 298static unsigned char lance_need_isa_bounce_buffers = 1;
299 299
300static int lance_open(struct net_device *dev); 300static int lance_open(struct net_device *dev);
301static void lance_init_ring(struct net_device *dev, int mode); 301static void lance_init_ring(struct net_device *dev, gfp_t mode);
302static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev); 302static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
303static int lance_rx(struct net_device *dev); 303static int lance_rx(struct net_device *dev);
304static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs); 304static irqreturn_t lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
@@ -846,7 +846,7 @@ lance_purge_ring(struct net_device *dev)
846 846
847/* Initialize the LANCE Rx and Tx rings. */ 847/* Initialize the LANCE Rx and Tx rings. */
848static void 848static void
849lance_init_ring(struct net_device *dev, int gfp) 849lance_init_ring(struct net_device *dev, gfp_t gfp)
850{ 850{
851 struct lance_private *lp = dev->priv; 851 struct lance_private *lp = dev->priv;
852 int i; 852 int i;
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 41bad07ac1ac..f7b7238d8352 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -415,6 +415,10 @@ static int rx_ring_size = RX_RING_SIZE;
415static int ticks_limit = 100; 415static int ticks_limit = 100;
416static int max_cmd_backlog = TX_RING_SIZE-1; 416static int max_cmd_backlog = TX_RING_SIZE-1;
417 417
418#ifdef CONFIG_NET_POLL_CONTROLLER
419static void i596_poll_controller(struct net_device *dev);
420#endif
421
418 422
419static inline void CA(struct net_device *dev) 423static inline void CA(struct net_device *dev)
420{ 424{
@@ -636,11 +640,11 @@ static int init_i596_mem(struct net_device *dev)
636 640
637 disable_irq(dev->irq); /* disable IRQs from LAN */ 641 disable_irq(dev->irq); /* disable IRQs from LAN */
638 DEB(DEB_INIT, 642 DEB(DEB_INIT,
639 printk("RESET 82596 port: %p (with IRQ %d disabled)\n", 643 printk("RESET 82596 port: %lx (with IRQ %d disabled)\n",
640 (void*)(dev->base_addr + PA_I82596_RESET), 644 (dev->base_addr + PA_I82596_RESET),
641 dev->irq)); 645 dev->irq));
642 646
643 gsc_writel(0, (void*)(dev->base_addr + PA_I82596_RESET)); /* Hard Reset */ 647 gsc_writel(0, (dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
644 udelay(100); /* Wait 100us - seems to help */ 648 udelay(100); /* Wait 100us - seems to help */
645 649
646 /* change the scp address */ 650 /* change the scp address */
@@ -1209,6 +1213,9 @@ static int __devinit i82596_probe(struct net_device *dev,
1209 dev->set_multicast_list = set_multicast_list; 1213 dev->set_multicast_list = set_multicast_list;
1210 dev->tx_timeout = i596_tx_timeout; 1214 dev->tx_timeout = i596_tx_timeout;
1211 dev->watchdog_timeo = TX_TIMEOUT; 1215 dev->watchdog_timeo = TX_TIMEOUT;
1216#ifdef CONFIG_NET_POLL_CONTROLLER
1217 dev->poll_controller = i596_poll_controller;
1218#endif
1212 1219
1213 dev->priv = (void *)(dev->mem_start); 1220 dev->priv = (void *)(dev->mem_start);
1214 1221
@@ -1242,6 +1249,14 @@ static int __devinit i82596_probe(struct net_device *dev,
1242 return 0; 1249 return 0;
1243} 1250}
1244 1251
1252#ifdef CONFIG_NET_POLL_CONTROLLER
1253static void i596_poll_controller(struct net_device *dev)
1254{
1255 disable_irq(dev->irq);
1256 i596_interrupt(dev->irq, dev, NULL);
1257 enable_irq(dev->irq);
1258}
1259#endif
1245 1260
1246static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs) 1261static irqreturn_t i596_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1247{ 1262{
@@ -1528,17 +1543,18 @@ lan_init_chip(struct parisc_device *dev)
1528 1543
1529 if (!dev->irq) { 1544 if (!dev->irq) {
1530 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", 1545 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
1531 __FILE__, dev->hpa); 1546 __FILE__, dev->hpa.start);
1532 return -ENODEV; 1547 return -ENODEV;
1533 } 1548 }
1534 1549
1535 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa, dev->irq); 1550 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
1551 dev->irq);
1536 1552
1537 netdevice = alloc_etherdev(0); 1553 netdevice = alloc_etherdev(0);
1538 if (!netdevice) 1554 if (!netdevice)
1539 return -ENOMEM; 1555 return -ENOMEM;
1540 1556
1541 netdevice->base_addr = dev->hpa; 1557 netdevice->base_addr = dev->hpa.start;
1542 netdevice->irq = dev->irq; 1558 netdevice->irq = dev->irq;
1543 1559
1544 retval = i82596_probe(netdevice, &dev->dev); 1560 retval = i82596_probe(netdevice, &dev->dev);
@@ -1566,7 +1582,7 @@ static struct parisc_device_id lan_tbl[] = {
1566MODULE_DEVICE_TABLE(parisc, lan_tbl); 1582MODULE_DEVICE_TABLE(parisc, lan_tbl);
1567 1583
1568static struct parisc_driver lan_driver = { 1584static struct parisc_driver lan_driver = {
1569 .name = "Apricot", 1585 .name = "lasi_82596",
1570 .id_table = lan_tbl, 1586 .id_table = lan_tbl,
1571 .probe = lan_init_chip, 1587 .probe = lan_init_chip,
1572}; 1588};
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 27f0d8ac4c40..309d254842cf 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -298,7 +298,7 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
298 return 0; 298 return 0;
299unmap: 299unmap:
300 if (ei_status.reg0) 300 if (ei_status.reg0)
301 iounmap((void *)dev->mem_start); 301 iounmap(ei_status.mem);
302cleanup: 302cleanup:
303 free_irq(dev->irq, dev); 303 free_irq(dev->irq, dev);
304 return ret; 304 return ret;
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index f2fc1f26cd4b..2a5add257b8f 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -1036,10 +1036,8 @@ static void __exit mace_cleanup(void)
1036{ 1036{
1037 macio_unregister_driver(&mace_driver); 1037 macio_unregister_driver(&mace_driver);
1038 1038
1039 if (dummy_buf) { 1039 kfree(dummy_buf);
1040 kfree(dummy_buf); 1040 dummy_buf = NULL;
1041 dummy_buf = NULL;
1042 }
1043} 1041}
1044 1042
1045MODULE_AUTHOR("Paul Mackerras"); 1043MODULE_AUTHOR("Paul Mackerras");
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c33cb3dc942b..e42aa797f08b 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -207,6 +207,20 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
207 return 0; 207 return 0;
208} 208}
209 209
210int mii_check_gmii_support(struct mii_if_info *mii)
211{
212 int reg;
213
214 reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
215 if (reg & BMSR_ESTATEN) {
216 reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS);
217 if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF))
218 return 1;
219 }
220
221 return 0;
222}
223
210int mii_link_ok (struct mii_if_info *mii) 224int mii_link_ok (struct mii_if_info *mii)
211{ 225{
212 /* first, a dummy read, needed to latch some MII phys */ 226 /* first, a dummy read, needed to latch some MII phys */
@@ -394,5 +408,6 @@ EXPORT_SYMBOL(mii_ethtool_gset);
394EXPORT_SYMBOL(mii_ethtool_sset); 408EXPORT_SYMBOL(mii_ethtool_sset);
395EXPORT_SYMBOL(mii_check_link); 409EXPORT_SYMBOL(mii_check_link);
396EXPORT_SYMBOL(mii_check_media); 410EXPORT_SYMBOL(mii_check_media);
411EXPORT_SYMBOL(mii_check_gmii_support);
397EXPORT_SYMBOL(generic_mii_ioctl); 412EXPORT_SYMBOL(generic_mii_ioctl);
398 413
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
new file mode 100644
index 000000000000..f79f7ee72ab8
--- /dev/null
+++ b/drivers/net/mipsnet.c
@@ -0,0 +1,371 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 */
6
7#define DEBUG
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <linux/sched.h>
14#include <linux/etherdevice.h>
15#include <linux/netdevice.h>
16#include <asm/io.h>
17#include <asm/mips-boards/simint.h>
18
19#include "mipsnet.h" /* actual device IO mapping */
20
21#define MIPSNET_VERSION "2005-06-20"
22
23#define mipsnet_reg_address(dev, field) (dev->base_addr + field_offset(field))
24
25struct mipsnet_priv {
26 struct net_device_stats stats;
27};
28
29static struct platform_device *mips_plat_dev;
30
31static char mipsnet_string[] = "mipsnet";
32
33/*
34 * Copy data from the MIPSNET rx data port
35 */
36static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata,
37 int len)
38{
39 uint32_t available_len = inl(mipsnet_reg_address(dev, rxDataCount));
40 if (available_len < len)
41 return -EFAULT;
42
43 for (; len > 0; len--, kdata++) {
44 *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer));
45 }
46
47 return inl(mipsnet_reg_address(dev, rxDataCount));
48}
49
50static inline ssize_t mipsnet_put_todevice(struct net_device *dev,
51 struct sk_buff *skb)
52{
53 int count_to_go = skb->len;
54 char *buf_ptr = skb->data;
55 struct mipsnet_priv *mp = netdev_priv(dev);
56
57 pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n",
58 dev->name, __FUNCTION__, skb->len);
59
60 outl(skb->len, mipsnet_reg_address(dev, txDataCount));
61
62 pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n",
63 dev->name, __FUNCTION__, skb->len);
64
65 for (; count_to_go; buf_ptr++, count_to_go--) {
66 outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer));
67 }
68
69 mp->stats.tx_packets++;
70 mp->stats.tx_bytes += skb->len;
71
72 return skb->len;
73}
74
75static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev)
76{
77 pr_debug("%s:%s(): transmitting %d bytes\n",
78 dev->name, __FUNCTION__, skb->len);
79
80 /* Only one packet at a time. Once TXDONE interrupt is serviced, the
81 * queue will be restarted.
82 */
83 netif_stop_queue(dev);
84 mipsnet_put_todevice(dev, skb);
85
86 return 0;
87}
88
89static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count)
90{
91 struct sk_buff *skb;
92 size_t len = count;
93 struct mipsnet_priv *mp = netdev_priv(dev);
94
95 if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) {
96 mp->stats.rx_dropped++;
97 return -ENOMEM;
98 }
99
100 skb_reserve(skb, 2);
101 if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len))
102 return -EFAULT;
103
104 skb->dev = dev;
105 skb->protocol = eth_type_trans(skb, dev);
106 skb->ip_summed = CHECKSUM_UNNECESSARY;
107
108 pr_debug("%s:%s(): pushing RXed data to kernel\n",
109 dev->name, __FUNCTION__);
110 netif_rx(skb);
111
112 mp->stats.rx_packets++;
113 mp->stats.rx_bytes += len;
114
115 return count;
116}
117
118static irqreturn_t
119mipsnet_interrupt(int irq, void *dev_id, struct pt_regs *regs)
120{
121 struct net_device *dev = dev_id;
122
123 irqreturn_t retval = IRQ_NONE;
124 uint64_t interruptFlags;
125
126 if (irq == dev->irq) {
127 pr_debug("%s:%s(): irq %d for device\n",
128 dev->name, __FUNCTION__, irq);
129
130 retval = IRQ_HANDLED;
131
132 interruptFlags =
133 inl(mipsnet_reg_address(dev, interruptControl));
134 pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name,
135 __FUNCTION__, interruptFlags);
136
137 if (interruptFlags & MIPSNET_INTCTL_TXDONE) {
138 pr_debug("%s:%s(): got TXDone\n",
139 dev->name, __FUNCTION__);
140 outl(MIPSNET_INTCTL_TXDONE,
141 mipsnet_reg_address(dev, interruptControl));
142 // only one packet at a time, we are done.
143 netif_wake_queue(dev);
144 } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) {
145 pr_debug("%s:%s(): got RX data\n",
146 dev->name, __FUNCTION__);
147 mipsnet_get_fromdev(dev,
148 inl(mipsnet_reg_address(dev, rxDataCount)));
149 pr_debug("%s:%s(): clearing RX int\n",
150 dev->name, __FUNCTION__);
151 outl(MIPSNET_INTCTL_RXDONE,
152 mipsnet_reg_address(dev, interruptControl));
153
154 } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) {
155 pr_debug("%s:%s(): got test interrupt\n",
156 dev->name, __FUNCTION__);
157 // TESTBIT is cleared on read.
158 // And takes effect after a write with 0
159 outl(0, mipsnet_reg_address(dev, interruptControl));
160 } else {
161 pr_debug("%s:%s(): no valid fags 0x%016llx\n",
162 dev->name, __FUNCTION__, interruptFlags);
163 // Maybe shared IRQ, just ignore, no clearing.
164 retval = IRQ_NONE;
165 }
166
167 } else {
168 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n",
169 dev->name, __FUNCTION__, irq);
170 retval = IRQ_NONE;
171 }
172 return retval;
173} //mipsnet_interrupt()
174
175static int mipsnet_open(struct net_device *dev)
176{
177 int err;
178 pr_debug("%s: mipsnet_open\n", dev->name);
179
180 err = request_irq(dev->irq, &mipsnet_interrupt,
181 SA_SHIRQ, dev->name, (void *) dev);
182
183 if (err) {
184 pr_debug("%s: %s(): can't get irq %d\n",
185 dev->name, __FUNCTION__, dev->irq);
186 release_region(dev->base_addr, MIPSNET_IO_EXTENT);
187 return err;
188 }
189
190 pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n",
191 dev->name, __FUNCTION__, dev->base_addr, dev->irq);
192
193
194 netif_start_queue(dev);
195
196 // test interrupt handler
197 outl(MIPSNET_INTCTL_TESTBIT,
198 mipsnet_reg_address(dev, interruptControl));
199
200
201 return 0;
202}
203
204static int mipsnet_close(struct net_device *dev)
205{
206 pr_debug("%s: %s()\n", dev->name, __FUNCTION__);
207 netif_stop_queue(dev);
208 return 0;
209}
210
211static struct net_device_stats *mipsnet_get_stats(struct net_device *dev)
212{
213 struct mipsnet_priv *mp = netdev_priv(dev);
214
215 return &mp->stats;
216}
217
218static void mipsnet_set_mclist(struct net_device *dev)
219{
220 // we don't do anything
221 return;
222}
223
224static int __init mipsnet_probe(struct device *dev)
225{
226 struct net_device *netdev;
227 int err;
228
229 netdev = alloc_etherdev(sizeof(struct mipsnet_priv));
230 if (!netdev) {
231 err = -ENOMEM;
232 goto out;
233 }
234
235 dev_set_drvdata(dev, netdev);
236
237 netdev->open = mipsnet_open;
238 netdev->stop = mipsnet_close;
239 netdev->hard_start_xmit = mipsnet_xmit;
240 netdev->get_stats = mipsnet_get_stats;
241 netdev->set_multicast_list = mipsnet_set_mclist;
242
243 /*
244 * TODO: probe for these or load them from PARAM
245 */
246 netdev->base_addr = 0x4200;
247 netdev->irq = MIPSCPU_INT_BASE + MIPSCPU_INT_MB0 +
248 inl(mipsnet_reg_address(netdev, interruptInfo));
249
250 // Get the io region now, get irq on open()
251 if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) {
252 pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} "
253 "for dev is not availble.\n", netdev->name,
254 __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT);
255 err = -EBUSY;
256 goto out_free_netdev;
257 }
258
259 /*
260 * Lacking any better mechanism to allocate a MAC address we use a
261 * random one ...
262 */
263 random_ether_addr(netdev->dev_addr);
264
265 err = register_netdev(netdev);
266 if (err) {
267 printk(KERN_ERR "MIPSNet: failed to register netdev.\n");
268 goto out_free_region;
269 }
270
271 return 0;
272
273out_free_region:
274 release_region(netdev->base_addr, MIPSNET_IO_EXTENT);
275
276out_free_netdev:
277 free_netdev(netdev);
278
279out:
280 return err;
281}
282
283static int __devexit mipsnet_device_remove(struct device *device)
284{
285 struct net_device *dev = dev_get_drvdata(device);
286
287 unregister_netdev(dev);
288 release_region(dev->base_addr, MIPSNET_IO_EXTENT);
289 free_netdev(dev);
290 dev_set_drvdata(device, NULL);
291
292 return 0;
293}
294
295static struct device_driver mipsnet_driver = {
296 .name = mipsnet_string,
297 .bus = &platform_bus_type,
298 .probe = mipsnet_probe,
299 .remove = __devexit_p(mipsnet_device_remove),
300};
301
302static void mipsnet_platform_release(struct device *device)
303{
304 struct platform_device *pldev;
305
306 /* free device */
307 pldev = to_platform_device(device);
308 kfree(pldev);
309}
310
311static int __init mipsnet_init_module(void)
312{
313 struct platform_device *pldev;
314 int err;
315
316 printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. "
317 "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION);
318
319 if (driver_register(&mipsnet_driver)) {
320 printk(KERN_ERR "Driver registration failed\n");
321 err = -ENODEV;
322 goto out;
323 }
324
325 if (!(pldev = kmalloc (sizeof (*pldev), GFP_KERNEL))) {
326 err = -ENOMEM;
327 goto out_unregister_driver;
328 }
329
330 memset (pldev, 0, sizeof (*pldev));
331 pldev->name = mipsnet_string;
332 pldev->id = 0;
333 pldev->dev.release = mipsnet_platform_release;
334
335 if (platform_device_register(pldev)) {
336 err = -ENODEV;
337 goto out_free_pldev;
338 }
339
340 if (!pldev->dev.driver) {
341 /*
342 * The driver was not bound to this device, there was
343 * no hardware at this address. Unregister it, as the
344 * release fuction will take care of freeing the
345 * allocated structure
346 */
347 platform_device_unregister (pldev);
348 }
349
350 mips_plat_dev = pldev;
351
352 return 0;
353
354out_free_pldev:
355 kfree(pldev);
356
357out_unregister_driver:
358 driver_unregister(&mipsnet_driver);
359out:
360 return err;
361}
362
363static void __exit mipsnet_exit_module(void)
364{
365 pr_debug("MIPSNet Ethernet driver exiting\n");
366
367 driver_unregister(&mipsnet_driver);
368}
369
370module_init(mipsnet_init_module);
371module_exit(mipsnet_exit_module);
diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h
new file mode 100644
index 000000000000..878535953cb1
--- /dev/null
+++ b/drivers/net/mipsnet.h
@@ -0,0 +1,127 @@
1//
2// <COPYRIGHT CLASS="1B" YEAR="2005">
3// Unpublished work (c) MIPS Technologies, Inc. All rights reserved.
4// Unpublished rights reserved under the copyright laws of the U.S.A. and
5// other countries.
6//
7// PROPRIETARY / SECRET CONFIDENTIAL INFORMATION OF MIPS TECHNOLOGIES, INC.
8// FOR INTERNAL USE ONLY.
9//
10// Under no circumstances (contract or otherwise) may this information be
11// disclosed to, or copied, modified or used by anyone other than employees
12// or contractors of MIPS Technologies having a need to know.
13// </COPYRIGHT>
14//
15//++
16// File: MIPS_Net.h
17//
18// Description:
19// The definition of the emulated MIPSNET device's interface.
20//
21// Notes: This include file needs to work from a Linux device drivers.
22//
23//--
24//
25
26#ifndef __MIPSNET_H
27#define __MIPSNET_H
28
29/*
30 * Id of this Net device, as seen by the core.
31 */
32#define MIPS_NET_DEV_ID ((uint64_t) \
33 ((uint64_t)'M'<< 0)| \
34 ((uint64_t)'I'<< 8)| \
35 ((uint64_t)'P'<<16)| \
36 ((uint64_t)'S'<<24)| \
37 ((uint64_t)'N'<<32)| \
38 ((uint64_t)'E'<<40)| \
39 ((uint64_t)'T'<<48)| \
40 ((uint64_t)'0'<<56))
41
42/*
43 * Net status/control block as seen by sw in the core.
44 * (Why not use bit fields? can't be bothered with cross-platform struct
45 * packing.)
46 */
47typedef struct _net_control_block {
48 /// dev info for probing
49 /// reads as MIPSNET%d where %d is some form of version
50 uint64_t devId; /*0x00 */
51
52 /*
53 * read only busy flag.
54 * Set and cleared by the Net Device to indicate that an rx or a tx
55 * is in progress.
56 */
57 uint32_t busy; /*0x08 */
58
59 /*
60 * Set by the Net Device.
61 * The device will set it once data has been received.
62 * The value is the number of bytes that should be read from
63 * rxDataBuffer. The value will decrease till 0 until all the data
64 * from rxDataBuffer has been read.
65 */
66 uint32_t rxDataCount; /*0x0c */
67#define MIPSNET_MAX_RXTX_DATACOUNT (1<<16)
68
69 /*
70 * Settable from the MIPS core, cleared by the Net Device.
71 * The core should set the number of bytes it wants to send,
72 * then it should write those bytes of data to txDataBuffer.
73 * The device will clear txDataCount has been processed (not necessarily sent).
74 */
75 uint32_t txDataCount; /*0x10 */
76
77 /*
78 * Interrupt control
79 *
80 * Used to clear the interrupted generated by this dev.
81 * Write a 1 to clear the interrupt. (except bit31).
82 *
83 * Bit0 is set if it was a tx-done interrupt.
84 * Bit1 is set when new rx-data is available.
85 * Until this bit is cleared there will be no other RXs.
86 *
87 * Bit31 is used for testing, it clears after a read.
88 * Writing 1 to this bit will cause an interrupt to be generated.
89 * To clear the test interrupt, write 0 to this register.
90 */
91 uint32_t interruptControl; /*0x14 */
92#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1<< 0))
93#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1<< 1))
94#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1<<31))
95#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE|MIPSNET_INTCTL_RXDONE|MIPSNET_INTCTL_TESTBIT)
96
97 /*
98 * Readonly core-specific interrupt info for the device to signal the core.
99 * The meaning of the contents of this field might change.
100 */
101 /*###\todo: the whole memIntf interrupt scheme is messy: the device should have
102 * no control what so ever of what VPE/register set is being used.
103 * The MemIntf should only expose interrupt lines, and something in the
104 * config should be responsible for the line<->core/vpe bindings.
105 */
106 uint32_t interruptInfo; /*0x18 */
107
108 /*
109 * This is where the received data is read out.
110 * There is more data to read until rxDataReady is 0.
111 * Only 1 byte at this regs offset is used.
112 */
113 uint32_t rxDataBuffer; /*0x1c */
114
115 /*
116 * This is where the data to transmit is written.
117 * Data should be written for the amount specified in the txDataCount register.
118 * Only 1 byte at this regs offset is used.
119 */
120 uint32_t txDataBuffer; /*0x20 */
121} MIPS_T_NetControl;
122
123#define MIPSNET_IO_EXTENT 0x40 /* being generous */
124
125#define field_offset(field) ((int)&((MIPS_T_NetControl*)(0))->field)
126
127#endif /* __MIPSNET_H */
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index f0996ce5c268..6c86dca62e2a 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -277,7 +277,7 @@ static void myri_init_rings(struct myri_eth *mp, int from_irq)
277 struct recvq __iomem *rq = mp->rq; 277 struct recvq __iomem *rq = mp->rq;
278 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0]; 278 struct myri_rxd __iomem *rxd = &rq->myri_rxd[0];
279 struct net_device *dev = mp->dev; 279 struct net_device *dev = mp->dev;
280 int gfp_flags = GFP_KERNEL; 280 gfp_t gfp_flags = GFP_KERNEL;
281 int i; 281 int i;
282 282
283 if (from_irq || in_interrupt()) 283 if (from_irq || in_interrupt())
diff --git a/drivers/net/myri_sbus.h b/drivers/net/myri_sbus.h
index 9391e55a5e92..47722f708a41 100644
--- a/drivers/net/myri_sbus.h
+++ b/drivers/net/myri_sbus.h
@@ -296,7 +296,7 @@ struct myri_eth {
296/* We use this to acquire receive skb's that we can DMA directly into. */ 296/* We use this to acquire receive skb's that we can DMA directly into. */
297#define ALIGNED_RX_SKB_ADDR(addr) \ 297#define ALIGNED_RX_SKB_ADDR(addr) \
298 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) 298 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
299static inline struct sk_buff *myri_alloc_skb(unsigned int length, int gfp_flags) 299static inline struct sk_buff *myri_alloc_skb(unsigned int length, gfp_t gfp_flags)
300{ 300{
301 struct sk_buff *skb; 301 struct sk_buff *skb;
302 302
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index d209a1556b2e..0de8fdd2aa86 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -54,6 +54,10 @@ static const char version2[] =
54#include <asm/system.h> 54#include <asm/system.h>
55#include <asm/io.h> 55#include <asm/io.h>
56 56
57#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
58#include <asm/tx4938/rbtx4938.h>
59#endif
60
57#include "8390.h" 61#include "8390.h"
58 62
59#define DRV_NAME "ne" 63#define DRV_NAME "ne"
@@ -111,6 +115,9 @@ bad_clone_list[] __initdata = {
111 {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */ 115 {"E-LAN100", "E-LAN200", {0x00, 0x00, 0x5d}}, /* Broken ne1000 clones */
112 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */ 116 {"PCM-4823", "PCM-4823", {0x00, 0xc0, 0x6c}}, /* Broken Advantech MoBo */
113 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */ 117 {"REALTEK", "RTL8019", {0x00, 0x00, 0xe8}}, /* no-name with Realtek chip */
118#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
119 {"RBHMA4X00-RTL8019", "RBHMA4X00/RTL8019", {0x00, 0x60, 0x0a}}, /* Toshiba built-in */
120#endif
114 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */ 121 {"LCS-8834", "LCS-8836", {0x04, 0x04, 0x37}}, /* ShinyNet (SET) */
115 {NULL,} 122 {NULL,}
116}; 123};
@@ -226,6 +233,10 @@ struct net_device * __init ne_probe(int unit)
226 sprintf(dev->name, "eth%d", unit); 233 sprintf(dev->name, "eth%d", unit);
227 netdev_boot_setup_check(dev); 234 netdev_boot_setup_check(dev);
228 235
236#ifdef CONFIG_TOSHIBA_RBTX4938
237 dev->base_addr = 0x07f20280;
238 dev->irq = RBTX4938_RTL_8019_IRQ;
239#endif
229 err = do_ne_probe(dev); 240 err = do_ne_probe(dev);
230 if (err) 241 if (err)
231 goto out; 242 goto out;
@@ -506,6 +517,10 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
506 ei_status.name = name; 517 ei_status.name = name;
507 ei_status.tx_start_page = start_page; 518 ei_status.tx_start_page = start_page;
508 ei_status.stop_page = stop_page; 519 ei_status.stop_page = stop_page;
520#if defined(CONFIG_TOSHIBA_RBTX4927) || defined(CONFIG_TOSHIBA_RBTX4938)
521 wordlength = 1;
522#endif
523
509#ifdef CONFIG_PLAT_OAKS32R 524#ifdef CONFIG_PLAT_OAKS32R
510 ei_status.word16 = 0; 525 ei_status.word16 = 0;
511#else 526#else
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index f1c01ac29102..d11821dd86ed 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -372,6 +372,7 @@ static int __devinit ne2k_pci_init_one (struct pci_dev *pdev,
372 printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":"); 372 printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
373 dev->dev_addr[i] = SA_prom[i]; 373 dev->dev_addr[i] = SA_prom[i];
374 } 374 }
375 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
375 376
376 return 0; 377 return 0;
377 378
@@ -637,6 +638,7 @@ static struct ethtool_ops ne2k_pci_ethtool_ops = {
637 .get_drvinfo = ne2k_pci_get_drvinfo, 638 .get_drvinfo = ne2k_pci_get_drvinfo,
638 .get_tx_csum = ethtool_op_get_tx_csum, 639 .get_tx_csum = ethtool_op_get_tx_csum,
639 .get_sg = ethtool_op_get_sg, 640 .get_sg = ethtool_op_get_sg,
641 .get_perm_addr = ethtool_op_get_perm_addr,
640}; 642};
641 643
642static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev) 644static void __devexit ne2k_pci_remove_one (struct pci_dev *pdev)
@@ -673,7 +675,6 @@ static int ne2k_pci_resume (struct pci_dev *pdev)
673 pci_set_power_state(pdev, 0); 675 pci_set_power_state(pdev, 0);
674 pci_restore_state(pdev); 676 pci_restore_state(pdev);
675 pci_enable_device(pdev); 677 pci_enable_device(pdev);
676 pci_set_master(pdev);
677 NS8390_init(dev, 1); 678 NS8390_init(dev, 1);
678 netif_device_attach(dev); 679 netif_device_attach(dev);
679 680
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 925d1dfcc4dc..bb42ff218484 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -696,8 +696,7 @@ static void ni65_free_buffer(struct priv *p)
696 return; 696 return;
697 697
698 for(i=0;i<TMDNUM;i++) { 698 for(i=0;i<TMDNUM;i++) {
699 if(p->tmdbounce[i]) 699 kfree(p->tmdbounce[i]);
700 kfree(p->tmdbounce[i]);
701#ifdef XMT_VIA_SKB 700#ifdef XMT_VIA_SKB
702 if(p->tmd_skb[i]) 701 if(p->tmd_skb[i])
703 dev_kfree_skb(p->tmd_skb[i]); 702 dev_kfree_skb(p->tmd_skb[i]);
@@ -710,12 +709,10 @@ static void ni65_free_buffer(struct priv *p)
710 if(p->recv_skb[i]) 709 if(p->recv_skb[i])
711 dev_kfree_skb(p->recv_skb[i]); 710 dev_kfree_skb(p->recv_skb[i]);
712#else 711#else
713 if(p->recvbounce[i]) 712 kfree(p->recvbounce[i]);
714 kfree(p->recvbounce[i]);
715#endif 713#endif
716 } 714 }
717 if(p->self) 715 kfree(p->self);
718 kfree(p->self);
719} 716}
720 717
721 718
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index e4811b42a6b7..a3c3fc9c0d8a 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1632,8 +1632,7 @@ static void ns83820_run_bist(struct net_device *ndev, const char *name, u32 enab
1632 timed_out = 1; 1632 timed_out = 1;
1633 break; 1633 break;
1634 } 1634 }
1635 set_current_state(TASK_UNINTERRUPTIBLE); 1635 schedule_timeout_uninterruptible(1);
1636 schedule_timeout(1);
1637 } 1636 }
1638 1637
1639 if (status & fail) 1638 if (status & fail)
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 9f22d138e3ad..818c185d6438 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1020,6 +1020,12 @@ static void set_misc_reg(struct net_device *dev)
1020 } else { 1020 } else {
1021 outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG); 1021 outb(full_duplex ? 4 : 0, nic_base + DLINK_DIAG);
1022 } 1022 }
1023 } else if (info->flags & IS_DL10019) {
1024 /* Advertise 100F, 100H, 10F, 10H */
1025 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
1026 /* Restart MII autonegotiation */
1027 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
1028 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
1023 } 1029 }
1024} 1030}
1025 1031
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 113b68099216..70fe81a89df9 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -22,8 +22,8 @@
22 *************************************************************************/ 22 *************************************************************************/
23 23
24#define DRV_NAME "pcnet32" 24#define DRV_NAME "pcnet32"
25#define DRV_VERSION "1.30j" 25#define DRV_VERSION "1.31a"
26#define DRV_RELDATE "29.04.2005" 26#define DRV_RELDATE "12.Sep.2005"
27#define PFX DRV_NAME ": " 27#define PFX DRV_NAME ": "
28 28
29static const char *version = 29static const char *version =
@@ -257,6 +257,9 @@ static int homepna[MAX_UNITS];
257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. 257 * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32.
258 * v1.30i 28 Jun 2004 Don Fry change to use module_param. 258 * v1.30i 28 Jun 2004 Don Fry change to use module_param.
259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. 259 * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test.
260 * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam().
261 * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4
262 * to allow loopback test to work unchanged.
260 */ 263 */
261 264
262 265
@@ -266,17 +269,17 @@ static int homepna[MAX_UNITS];
266 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). 269 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
267 */ 270 */
268#ifndef PCNET32_LOG_TX_BUFFERS 271#ifndef PCNET32_LOG_TX_BUFFERS
269#define PCNET32_LOG_TX_BUFFERS 4 272#define PCNET32_LOG_TX_BUFFERS 4
270#define PCNET32_LOG_RX_BUFFERS 5 273#define PCNET32_LOG_RX_BUFFERS 5
274#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
275#define PCNET32_LOG_MAX_RX_BUFFERS 9
271#endif 276#endif
272 277
273#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS)) 278#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
274#define TX_RING_MOD_MASK (TX_RING_SIZE - 1) 279#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
275#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
276 280
277#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 281#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
278#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 282#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
279#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
280 283
281#define PKT_BUF_SZ 1544 284#define PKT_BUF_SZ 1544
282 285
@@ -334,14 +337,14 @@ struct pcnet32_access {
334}; 337};
335 338
336/* 339/*
337 * The first three fields of pcnet32_private are read by the ethernet device 340 * The first field of pcnet32_private is read by the ethernet device
338 * so we allocate the structure should be allocated by pci_alloc_consistent(). 341 * so the structure should be allocated using pci_alloc_consistent().
339 */ 342 */
340struct pcnet32_private { 343struct pcnet32_private {
341 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
342 struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
343 struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
344 struct pcnet32_init_block init_block; 344 struct pcnet32_init_block init_block;
345 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
346 struct pcnet32_rx_head *rx_ring;
347 struct pcnet32_tx_head *tx_ring;
345 dma_addr_t dma_addr; /* DMA address of beginning of this 348 dma_addr_t dma_addr; /* DMA address of beginning of this
346 object, returned by 349 object, returned by
347 pci_alloc_consistent */ 350 pci_alloc_consistent */
@@ -349,13 +352,21 @@ struct pcnet32_private {
349 structure */ 352 structure */
350 const char *name; 353 const char *name;
351 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 354 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
352 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 355 struct sk_buff **tx_skbuff;
353 struct sk_buff *rx_skbuff[RX_RING_SIZE]; 356 struct sk_buff **rx_skbuff;
354 dma_addr_t tx_dma_addr[TX_RING_SIZE]; 357 dma_addr_t *tx_dma_addr;
355 dma_addr_t rx_dma_addr[RX_RING_SIZE]; 358 dma_addr_t *rx_dma_addr;
356 struct pcnet32_access a; 359 struct pcnet32_access a;
357 spinlock_t lock; /* Guard lock */ 360 spinlock_t lock; /* Guard lock */
358 unsigned int cur_rx, cur_tx; /* The next free ring entry */ 361 unsigned int cur_rx, cur_tx; /* The next free ring entry */
362 unsigned int rx_ring_size; /* current rx ring size */
363 unsigned int tx_ring_size; /* current tx ring size */
364 unsigned int rx_mod_mask; /* rx ring modular mask */
365 unsigned int tx_mod_mask; /* tx ring modular mask */
366 unsigned short rx_len_bits;
367 unsigned short tx_len_bits;
368 dma_addr_t rx_ring_dma_addr;
369 dma_addr_t tx_ring_dma_addr;
359 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ 370 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
360 struct net_device_stats stats; 371 struct net_device_stats stats;
361 char tx_full; 372 char tx_full;
@@ -397,6 +408,9 @@ static int pcnet32_get_regs_len(struct net_device *dev);
397static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 408static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
398 void *ptr); 409 void *ptr);
399static void pcnet32_purge_tx_ring(struct net_device *dev); 410static void pcnet32_purge_tx_ring(struct net_device *dev);
411static int pcnet32_alloc_ring(struct net_device *dev);
412static void pcnet32_free_ring(struct net_device *dev);
413
400 414
401enum pci_flags_bit { 415enum pci_flags_bit {
402 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, 416 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
@@ -613,10 +627,62 @@ static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringpar
613{ 627{
614 struct pcnet32_private *lp = dev->priv; 628 struct pcnet32_private *lp = dev->priv;
615 629
616 ering->tx_max_pending = TX_RING_SIZE - 1; 630 ering->tx_max_pending = TX_MAX_RING_SIZE - 1;
617 ering->tx_pending = lp->cur_tx - lp->dirty_tx; 631 ering->tx_pending = lp->tx_ring_size - 1;
618 ering->rx_max_pending = RX_RING_SIZE - 1; 632 ering->rx_max_pending = RX_MAX_RING_SIZE - 1;
619 ering->rx_pending = lp->cur_rx & RX_RING_MOD_MASK; 633 ering->rx_pending = lp->rx_ring_size - 1;
634}
635
636static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
637{
638 struct pcnet32_private *lp = dev->priv;
639 unsigned long flags;
640 int i;
641
642 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
643 return -EINVAL;
644
645 if (netif_running(dev))
646 pcnet32_close(dev);
647
648 spin_lock_irqsave(&lp->lock, flags);
649 pcnet32_free_ring(dev);
650 lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE);
651 lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE);
652
653 /* set the minimum ring size to 4, to allow the loopback test to work
654 * unchanged.
655 */
656 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
657 if (lp->tx_ring_size <= (1 << i))
658 break;
659 }
660 lp->tx_ring_size = (1 << i);
661 lp->tx_mod_mask = lp->tx_ring_size - 1;
662 lp->tx_len_bits = (i << 12);
663
664 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
665 if (lp->rx_ring_size <= (1 << i))
666 break;
667 }
668 lp->rx_ring_size = (1 << i);
669 lp->rx_mod_mask = lp->rx_ring_size - 1;
670 lp->rx_len_bits = (i << 4);
671
672 if (pcnet32_alloc_ring(dev)) {
673 pcnet32_free_ring(dev);
674 return -ENOMEM;
675 }
676
677 spin_unlock_irqrestore(&lp->lock, flags);
678
679 if (pcnet32_debug & NETIF_MSG_DRV)
680 printk(KERN_INFO PFX "Ring Param Settings: RX: %d, TX: %d\n", lp->rx_ring_size, lp->tx_ring_size);
681
682 if (netif_running(dev))
683 pcnet32_open(dev);
684
685 return 0;
620} 686}
621 687
622static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) 688static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -948,6 +1014,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
948 .nway_reset = pcnet32_nway_reset, 1014 .nway_reset = pcnet32_nway_reset,
949 .get_link = pcnet32_get_link, 1015 .get_link = pcnet32_get_link,
950 .get_ringparam = pcnet32_get_ringparam, 1016 .get_ringparam = pcnet32_get_ringparam,
1017 .set_ringparam = pcnet32_set_ringparam,
951 .get_tx_csum = ethtool_op_get_tx_csum, 1018 .get_tx_csum = ethtool_op_get_tx_csum,
952 .get_sg = ethtool_op_get_sg, 1019 .get_sg = ethtool_op_get_sg,
953 .get_tso = ethtool_op_get_tso, 1020 .get_tso = ethtool_op_get_tso,
@@ -957,6 +1024,7 @@ static struct ethtool_ops pcnet32_ethtool_ops = {
957 .phys_id = pcnet32_phys_id, 1024 .phys_id = pcnet32_phys_id,
958 .get_regs_len = pcnet32_get_regs_len, 1025 .get_regs_len = pcnet32_get_regs_len,
959 .get_regs = pcnet32_get_regs, 1026 .get_regs = pcnet32_get_regs,
1027 .get_perm_addr = ethtool_op_get_perm_addr,
960}; 1028};
961 1029
962/* only probes for non-PCI devices, the rest are handled by 1030/* only probes for non-PCI devices, the rest are handled by
@@ -1185,9 +1253,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1185 memcpy(dev->dev_addr, promaddr, 6); 1253 memcpy(dev->dev_addr, promaddr, 6);
1186 } 1254 }
1187 } 1255 }
1256 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1188 1257
1189 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ 1258 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1190 if (!is_valid_ether_addr(dev->dev_addr)) 1259 if (!is_valid_ether_addr(dev->perm_addr))
1191 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1260 memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
1192 1261
1193 if (pcnet32_debug & NETIF_MSG_PROBE) { 1262 if (pcnet32_debug & NETIF_MSG_PROBE) {
@@ -1239,6 +1308,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1239 dev->priv = lp; 1308 dev->priv = lp;
1240 lp->name = chipname; 1309 lp->name = chipname;
1241 lp->shared_irq = shared; 1310 lp->shared_irq = shared;
1311 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1312 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1313 lp->tx_mod_mask = lp->tx_ring_size - 1;
1314 lp->rx_mod_mask = lp->rx_ring_size - 1;
1315 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1316 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1242 lp->mii_if.full_duplex = fdx; 1317 lp->mii_if.full_duplex = fdx;
1243 lp->mii_if.phy_id_mask = 0x1f; 1318 lp->mii_if.phy_id_mask = 0x1f;
1244 lp->mii_if.reg_num_mask = 0x1f; 1319 lp->mii_if.reg_num_mask = 0x1f;
@@ -1265,21 +1340,23 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1265 } 1340 }
1266 lp->a = *a; 1341 lp->a = *a;
1267 1342
1343 if (pcnet32_alloc_ring(dev)) {
1344 ret = -ENOMEM;
1345 goto err_free_ring;
1346 }
1268 /* detect special T1/E1 WAN card by checking for MAC address */ 1347 /* detect special T1/E1 WAN card by checking for MAC address */
1269 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 1348 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0
1270 && dev->dev_addr[2] == 0x75) 1349 && dev->dev_addr[2] == 0x75)
1271 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; 1350 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1272 1351
1273 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ 1352 lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
1274 lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); 1353 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1275 for (i = 0; i < 6; i++) 1354 for (i = 0; i < 6; i++)
1276 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1355 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1277 lp->init_block.filter[0] = 0x00000000; 1356 lp->init_block.filter[0] = 0x00000000;
1278 lp->init_block.filter[1] = 0x00000000; 1357 lp->init_block.filter[1] = 0x00000000;
1279 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + 1358 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1280 offsetof(struct pcnet32_private, rx_ring)); 1359 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1281 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
1282 offsetof(struct pcnet32_private, tx_ring));
1283 1360
1284 /* switch pcnet32 to 32bit mode */ 1361 /* switch pcnet32 to 32bit mode */
1285 a->write_bcr(ioaddr, 20, 2); 1362 a->write_bcr(ioaddr, 20, 2);
@@ -1310,7 +1387,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1310 if (pcnet32_debug & NETIF_MSG_PROBE) 1387 if (pcnet32_debug & NETIF_MSG_PROBE)
1311 printk(", failed to detect IRQ line.\n"); 1388 printk(", failed to detect IRQ line.\n");
1312 ret = -ENODEV; 1389 ret = -ENODEV;
1313 goto err_free_consistent; 1390 goto err_free_ring;
1314 } 1391 }
1315 if (pcnet32_debug & NETIF_MSG_PROBE) 1392 if (pcnet32_debug & NETIF_MSG_PROBE)
1316 printk(", probed IRQ %d.\n", dev->irq); 1393 printk(", probed IRQ %d.\n", dev->irq);
@@ -1341,7 +1418,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1341 1418
1342 /* Fill in the generic fields of the device structure. */ 1419 /* Fill in the generic fields of the device structure. */
1343 if (register_netdev(dev)) 1420 if (register_netdev(dev))
1344 goto err_free_consistent; 1421 goto err_free_ring;
1345 1422
1346 if (pdev) { 1423 if (pdev) {
1347 pci_set_drvdata(pdev, dev); 1424 pci_set_drvdata(pdev, dev);
@@ -1359,6 +1436,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1359 1436
1360 return 0; 1437 return 0;
1361 1438
1439err_free_ring:
1440 pcnet32_free_ring(dev);
1362err_free_consistent: 1441err_free_consistent:
1363 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 1442 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
1364err_free_netdev: 1443err_free_netdev:
@@ -1369,6 +1448,86 @@ err_release_region:
1369} 1448}
1370 1449
1371 1450
1451static int pcnet32_alloc_ring(struct net_device *dev)
1452{
1453 struct pcnet32_private *lp = dev->priv;
1454
1455 if ((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1456 &lp->tx_ring_dma_addr)) == NULL) {
1457 if (pcnet32_debug & NETIF_MSG_DRV)
1458 printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
1459 return -ENOMEM;
1460 }
1461
1462 if ((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1463 &lp->rx_ring_dma_addr)) == NULL) {
1464 if (pcnet32_debug & NETIF_MSG_DRV)
1465 printk(KERN_ERR PFX "Consistent memory allocation failed.\n");
1466 return -ENOMEM;
1467 }
1468
1469 if (!(lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, GFP_ATOMIC))) {
1470 if (pcnet32_debug & NETIF_MSG_DRV)
1471 printk(KERN_ERR PFX "Memory allocation failed.\n");
1472 return -ENOMEM;
1473 }
1474 memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size);
1475
1476 if (!(lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, GFP_ATOMIC))) {
1477 if (pcnet32_debug & NETIF_MSG_DRV)
1478 printk(KERN_ERR PFX "Memory allocation failed.\n");
1479 return -ENOMEM;
1480 }
1481 memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size);
1482
1483 if (!(lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, GFP_ATOMIC))) {
1484 if (pcnet32_debug & NETIF_MSG_DRV)
1485 printk(KERN_ERR PFX "Memory allocation failed.\n");
1486 return -ENOMEM;
1487 }
1488 memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size);
1489
1490 if (!(lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, GFP_ATOMIC))) {
1491 if (pcnet32_debug & NETIF_MSG_DRV)
1492 printk(KERN_ERR PFX "Memory allocation failed.\n");
1493 return -ENOMEM;
1494 }
1495 memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size);
1496
1497 return 0;
1498}
1499
1500
1501static void pcnet32_free_ring(struct net_device *dev)
1502{
1503 struct pcnet32_private *lp = dev->priv;
1504
1505 kfree(lp->tx_skbuff);
1506 lp->tx_skbuff = NULL;
1507
1508 kfree(lp->rx_skbuff);
1509 lp->rx_skbuff = NULL;
1510
1511 kfree(lp->tx_dma_addr);
1512 lp->tx_dma_addr = NULL;
1513
1514 kfree(lp->rx_dma_addr);
1515 lp->rx_dma_addr = NULL;
1516
1517 if (lp->tx_ring) {
1518 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
1519 lp->tx_ring, lp->tx_ring_dma_addr);
1520 lp->tx_ring = NULL;
1521 }
1522
1523 if (lp->rx_ring) {
1524 pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
1525 lp->rx_ring, lp->rx_ring_dma_addr);
1526 lp->rx_ring = NULL;
1527 }
1528}
1529
1530
1372static int 1531static int
1373pcnet32_open(struct net_device *dev) 1532pcnet32_open(struct net_device *dev)
1374{ 1533{
@@ -1400,8 +1559,8 @@ pcnet32_open(struct net_device *dev)
1400 if (netif_msg_ifup(lp)) 1559 if (netif_msg_ifup(lp))
1401 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", 1560 printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
1402 dev->name, dev->irq, 1561 dev->name, dev->irq,
1403 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, tx_ring)), 1562 (u32) (lp->tx_ring_dma_addr),
1404 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, rx_ring)), 1563 (u32) (lp->rx_ring_dma_addr),
1405 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); 1564 (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block)));
1406 1565
1407 /* set/reset autoselect bit */ 1566 /* set/reset autoselect bit */
@@ -1521,7 +1680,7 @@ pcnet32_open(struct net_device *dev)
1521 1680
1522err_free_ring: 1681err_free_ring:
1523 /* free any allocated skbuffs */ 1682 /* free any allocated skbuffs */
1524 for (i = 0; i < RX_RING_SIZE; i++) { 1683 for (i = 0; i < lp->rx_ring_size; i++) {
1525 lp->rx_ring[i].status = 0; 1684 lp->rx_ring[i].status = 0;
1526 if (lp->rx_skbuff[i]) { 1685 if (lp->rx_skbuff[i]) {
1527 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, 1686 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2,
@@ -1531,6 +1690,9 @@ err_free_ring:
1531 lp->rx_skbuff[i] = NULL; 1690 lp->rx_skbuff[i] = NULL;
1532 lp->rx_dma_addr[i] = 0; 1691 lp->rx_dma_addr[i] = 0;
1533 } 1692 }
1693
1694 pcnet32_free_ring(dev);
1695
1534 /* 1696 /*
1535 * Switch back to 16bit mode to avoid problems with dumb 1697 * Switch back to 16bit mode to avoid problems with dumb
1536 * DOS packet driver after a warm reboot 1698 * DOS packet driver after a warm reboot
@@ -1562,7 +1724,7 @@ pcnet32_purge_tx_ring(struct net_device *dev)
1562 struct pcnet32_private *lp = dev->priv; 1724 struct pcnet32_private *lp = dev->priv;
1563 int i; 1725 int i;
1564 1726
1565 for (i = 0; i < TX_RING_SIZE; i++) { 1727 for (i = 0; i < lp->tx_ring_size; i++) {
1566 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1728 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1567 wmb(); /* Make sure adapter sees owner change */ 1729 wmb(); /* Make sure adapter sees owner change */
1568 if (lp->tx_skbuff[i]) { 1730 if (lp->tx_skbuff[i]) {
@@ -1587,7 +1749,7 @@ pcnet32_init_ring(struct net_device *dev)
1587 lp->cur_rx = lp->cur_tx = 0; 1749 lp->cur_rx = lp->cur_tx = 0;
1588 lp->dirty_rx = lp->dirty_tx = 0; 1750 lp->dirty_rx = lp->dirty_tx = 0;
1589 1751
1590 for (i = 0; i < RX_RING_SIZE; i++) { 1752 for (i = 0; i < lp->rx_ring_size; i++) {
1591 struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; 1753 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
1592 if (rx_skbuff == NULL) { 1754 if (rx_skbuff == NULL) {
1593 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { 1755 if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) {
@@ -1611,20 +1773,18 @@ pcnet32_init_ring(struct net_device *dev)
1611 } 1773 }
1612 /* The Tx buffer address is filled in as needed, but we do need to clear 1774 /* The Tx buffer address is filled in as needed, but we do need to clear
1613 * the upper ownership bit. */ 1775 * the upper ownership bit. */
1614 for (i = 0; i < TX_RING_SIZE; i++) { 1776 for (i = 0; i < lp->tx_ring_size; i++) {
1615 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 1777 lp->tx_ring[i].status = 0; /* CPU owns buffer */
1616 wmb(); /* Make sure adapter sees owner change */ 1778 wmb(); /* Make sure adapter sees owner change */
1617 lp->tx_ring[i].base = 0; 1779 lp->tx_ring[i].base = 0;
1618 lp->tx_dma_addr[i] = 0; 1780 lp->tx_dma_addr[i] = 0;
1619 } 1781 }
1620 1782
1621 lp->init_block.tlen_rlen = le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS); 1783 lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits);
1622 for (i = 0; i < 6; i++) 1784 for (i = 0; i < 6; i++)
1623 lp->init_block.phys_addr[i] = dev->dev_addr[i]; 1785 lp->init_block.phys_addr[i] = dev->dev_addr[i];
1624 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->dma_addr + 1786 lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr);
1625 offsetof(struct pcnet32_private, rx_ring)); 1787 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr);
1626 lp->init_block.tx_ring = (u32)le32_to_cpu(lp->dma_addr +
1627 offsetof(struct pcnet32_private, tx_ring));
1628 wmb(); /* Make sure all changes are visible */ 1788 wmb(); /* Make sure all changes are visible */
1629 return 0; 1789 return 0;
1630} 1790}
@@ -1682,13 +1842,13 @@ pcnet32_tx_timeout (struct net_device *dev)
1682 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", 1842 printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
1683 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", 1843 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
1684 lp->cur_rx); 1844 lp->cur_rx);
1685 for (i = 0 ; i < RX_RING_SIZE; i++) 1845 for (i = 0 ; i < lp->rx_ring_size; i++)
1686 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1846 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1687 le32_to_cpu(lp->rx_ring[i].base), 1847 le32_to_cpu(lp->rx_ring[i].base),
1688 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, 1848 (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff,
1689 le32_to_cpu(lp->rx_ring[i].msg_length), 1849 le32_to_cpu(lp->rx_ring[i].msg_length),
1690 le16_to_cpu(lp->rx_ring[i].status)); 1850 le16_to_cpu(lp->rx_ring[i].status));
1691 for (i = 0 ; i < TX_RING_SIZE; i++) 1851 for (i = 0 ; i < lp->tx_ring_size; i++)
1692 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", 1852 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
1693 le32_to_cpu(lp->tx_ring[i].base), 1853 le32_to_cpu(lp->tx_ring[i].base),
1694 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, 1854 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
@@ -1729,7 +1889,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1729 /* Fill in a Tx ring entry */ 1889 /* Fill in a Tx ring entry */
1730 1890
1731 /* Mask to ring buffer boundary. */ 1891 /* Mask to ring buffer boundary. */
1732 entry = lp->cur_tx & TX_RING_MOD_MASK; 1892 entry = lp->cur_tx & lp->tx_mod_mask;
1733 1893
1734 /* Caution: the write order is important here, set the status 1894 /* Caution: the write order is important here, set the status
1735 * with the "ownership" bits last. */ 1895 * with the "ownership" bits last. */
@@ -1753,7 +1913,7 @@ pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev)
1753 1913
1754 dev->trans_start = jiffies; 1914 dev->trans_start = jiffies;
1755 1915
1756 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK].base != 0) { 1916 if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) {
1757 lp->tx_full = 1; 1917 lp->tx_full = 1;
1758 netif_stop_queue(dev); 1918 netif_stop_queue(dev);
1759 } 1919 }
@@ -1806,7 +1966,7 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1806 int delta; 1966 int delta;
1807 1967
1808 while (dirty_tx != lp->cur_tx) { 1968 while (dirty_tx != lp->cur_tx) {
1809 int entry = dirty_tx & TX_RING_MOD_MASK; 1969 int entry = dirty_tx & lp->tx_mod_mask;
1810 int status = (short)le16_to_cpu(lp->tx_ring[entry].status); 1970 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1811 1971
1812 if (status < 0) 1972 if (status < 0)
@@ -1864,18 +2024,18 @@ pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1864 dirty_tx++; 2024 dirty_tx++;
1865 } 2025 }
1866 2026
1867 delta = (lp->cur_tx - dirty_tx) & (TX_RING_MOD_MASK + TX_RING_SIZE); 2027 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1868 if (delta > TX_RING_SIZE) { 2028 if (delta > lp->tx_ring_size) {
1869 if (netif_msg_drv(lp)) 2029 if (netif_msg_drv(lp))
1870 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", 2030 printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1871 dev->name, dirty_tx, lp->cur_tx, lp->tx_full); 2031 dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
1872 dirty_tx += TX_RING_SIZE; 2032 dirty_tx += lp->tx_ring_size;
1873 delta -= TX_RING_SIZE; 2033 delta -= lp->tx_ring_size;
1874 } 2034 }
1875 2035
1876 if (lp->tx_full && 2036 if (lp->tx_full &&
1877 netif_queue_stopped(dev) && 2037 netif_queue_stopped(dev) &&
1878 delta < TX_RING_SIZE - 2) { 2038 delta < lp->tx_ring_size - 2) {
1879 /* The ring is no longer full, clear tbusy. */ 2039 /* The ring is no longer full, clear tbusy. */
1880 lp->tx_full = 0; 2040 lp->tx_full = 0;
1881 netif_wake_queue (dev); 2041 netif_wake_queue (dev);
@@ -1932,8 +2092,8 @@ static int
1932pcnet32_rx(struct net_device *dev) 2092pcnet32_rx(struct net_device *dev)
1933{ 2093{
1934 struct pcnet32_private *lp = dev->priv; 2094 struct pcnet32_private *lp = dev->priv;
1935 int entry = lp->cur_rx & RX_RING_MOD_MASK; 2095 int entry = lp->cur_rx & lp->rx_mod_mask;
1936 int boguscnt = RX_RING_SIZE / 2; 2096 int boguscnt = lp->rx_ring_size / 2;
1937 2097
1938 /* If we own the next entry, it's a new packet. Send it up. */ 2098 /* If we own the next entry, it's a new packet. Send it up. */
1939 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { 2099 while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
@@ -1998,12 +2158,12 @@ pcnet32_rx(struct net_device *dev)
1998 if (netif_msg_drv(lp)) 2158 if (netif_msg_drv(lp))
1999 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", 2159 printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n",
2000 dev->name); 2160 dev->name);
2001 for (i = 0; i < RX_RING_SIZE; i++) 2161 for (i = 0; i < lp->rx_ring_size; i++)
2002 if ((short)le16_to_cpu(lp->rx_ring[(entry+i) 2162 if ((short)le16_to_cpu(lp->rx_ring[(entry+i)
2003 & RX_RING_MOD_MASK].status) < 0) 2163 & lp->rx_mod_mask].status) < 0)
2004 break; 2164 break;
2005 2165
2006 if (i > RX_RING_SIZE -2) { 2166 if (i > lp->rx_ring_size -2) {
2007 lp->stats.rx_dropped++; 2167 lp->stats.rx_dropped++;
2008 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2168 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2009 wmb(); /* Make sure adapter sees owner change */ 2169 wmb(); /* Make sure adapter sees owner change */
@@ -2041,7 +2201,7 @@ pcnet32_rx(struct net_device *dev)
2041 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 2201 lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
2042 wmb(); /* Make sure owner changes after all others are visible */ 2202 wmb(); /* Make sure owner changes after all others are visible */
2043 lp->rx_ring[entry].status |= le16_to_cpu(0x8000); 2203 lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
2044 entry = (++lp->cur_rx) & RX_RING_MOD_MASK; 2204 entry = (++lp->cur_rx) & lp->rx_mod_mask;
2045 if (--boguscnt <= 0) break; /* don't stay in loop forever */ 2205 if (--boguscnt <= 0) break; /* don't stay in loop forever */
2046 } 2206 }
2047 2207
@@ -2084,7 +2244,7 @@ pcnet32_close(struct net_device *dev)
2084 spin_lock_irqsave(&lp->lock, flags); 2244 spin_lock_irqsave(&lp->lock, flags);
2085 2245
2086 /* free all allocated skbuffs */ 2246 /* free all allocated skbuffs */
2087 for (i = 0; i < RX_RING_SIZE; i++) { 2247 for (i = 0; i < lp->rx_ring_size; i++) {
2088 lp->rx_ring[i].status = 0; 2248 lp->rx_ring[i].status = 0;
2089 wmb(); /* Make sure adapter sees owner change */ 2249 wmb(); /* Make sure adapter sees owner change */
2090 if (lp->rx_skbuff[i]) { 2250 if (lp->rx_skbuff[i]) {
@@ -2096,7 +2256,7 @@ pcnet32_close(struct net_device *dev)
2096 lp->rx_dma_addr[i] = 0; 2256 lp->rx_dma_addr[i] = 0;
2097 } 2257 }
2098 2258
2099 for (i = 0; i < TX_RING_SIZE; i++) { 2259 for (i = 0; i < lp->tx_ring_size; i++) {
2100 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2260 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2101 wmb(); /* Make sure adapter sees owner change */ 2261 wmb(); /* Make sure adapter sees owner change */
2102 if (lp->tx_skbuff[i]) { 2262 if (lp->tx_skbuff[i]) {
@@ -2265,6 +2425,7 @@ static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2265 struct pcnet32_private *lp = dev->priv; 2425 struct pcnet32_private *lp = dev->priv;
2266 2426
2267 unregister_netdev(dev); 2427 unregister_netdev(dev);
2428 pcnet32_free_ring(dev);
2268 release_region(dev->base_addr, PCNET32_TOTAL_SIZE); 2429 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2269 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2430 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2270 free_netdev(dev); 2431 free_netdev(dev);
@@ -2340,6 +2501,7 @@ static void __exit pcnet32_cleanup_module(void)
2340 struct pcnet32_private *lp = pcnet32_dev->priv; 2501 struct pcnet32_private *lp = pcnet32_dev->priv;
2341 next_dev = lp->next; 2502 next_dev = lp->next;
2342 unregister_netdev(pcnet32_dev); 2503 unregister_netdev(pcnet32_dev);
2504 pcnet32_free_ring(pcnet32_dev);
2343 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); 2505 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2344 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); 2506 pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr);
2345 free_netdev(pcnet32_dev); 2507 free_netdev(pcnet32_dev);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 14f4de1a8180..c782a6329805 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -12,14 +12,6 @@ config PHYLIB
12 devices. This option provides infrastructure for 12 devices. This option provides infrastructure for
13 managing PHY devices. 13 managing PHY devices.
14 14
15config PHYCONTROL
16 bool " Support for automatically handling PHY state changes"
17 depends on PHYLIB
18 help
19 Adds code to perform all the work for keeping PHY link
20 state (speed/duplex/etc) up-to-date. Also handles
21 interrupts.
22
23comment "MII PHY device drivers" 15comment "MII PHY device drivers"
24 depends on PHYLIB 16 depends on PHYLIB
25 17
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 90630672703d..ad93b0da87f0 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -133,13 +133,9 @@ static int mdio_bus_suspend(struct device * dev, pm_message_t state)
133 int ret = 0; 133 int ret = 0;
134 struct device_driver *drv = dev->driver; 134 struct device_driver *drv = dev->driver;
135 135
136 if (drv && drv->suspend) { 136 if (drv && drv->suspend)
137 ret = drv->suspend(dev, state, SUSPEND_DISABLE); 137 ret = drv->suspend(dev, state);
138 if (ret == 0) 138
139 ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
140 if (ret == 0)
141 ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
142 }
143 return ret; 139 return ret;
144} 140}
145 141
@@ -148,13 +144,9 @@ static int mdio_bus_resume(struct device * dev)
148 int ret = 0; 144 int ret = 0;
149 struct device_driver *drv = dev->driver; 145 struct device_driver *drv = dev->driver;
150 146
151 if (drv && drv->resume) { 147 if (drv && drv->resume)
152 ret = drv->resume(dev, RESUME_POWER_ON); 148 ret = drv->resume(dev);
153 if (ret == 0) 149
154 ret = drv->resume(dev, RESUME_RESTORE_STATE);
155 if (ret == 0)
156 ret = drv->resume(dev, RESUME_ENABLE);
157 }
158 return ret; 150 return ret;
159} 151}
160 152
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index d9e11f93bf3a..9209da9dde0d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -242,10 +242,6 @@ EXPORT_SYMBOL(phy_sanitize_settings);
242 * choose the next best ones from the ones selected, so we don't 242 * choose the next best ones from the ones selected, so we don't
243 * care if ethtool tries to give us bad values 243 * care if ethtool tries to give us bad values
244 * 244 *
245 * A note about the PHYCONTROL Layer. If you turn off
246 * CONFIG_PHYCONTROL, you will need to read the PHY status
247 * registers after this function completes, and update your
248 * controller manually.
249 */ 245 */
250int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) 246int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
251{ 247{
@@ -380,7 +376,6 @@ int phy_start_aneg(struct phy_device *phydev)
380 376
381 err = phydev->drv->config_aneg(phydev); 377 err = phydev->drv->config_aneg(phydev);
382 378
383#ifdef CONFIG_PHYCONTROL
384 if (err < 0) 379 if (err < 0)
385 goto out_unlock; 380 goto out_unlock;
386 381
@@ -395,14 +390,12 @@ int phy_start_aneg(struct phy_device *phydev)
395 } 390 }
396 391
397out_unlock: 392out_unlock:
398#endif
399 spin_unlock(&phydev->lock); 393 spin_unlock(&phydev->lock);
400 return err; 394 return err;
401} 395}
402EXPORT_SYMBOL(phy_start_aneg); 396EXPORT_SYMBOL(phy_start_aneg);
403 397
404 398
405#ifdef CONFIG_PHYCONTROL
406static void phy_change(void *data); 399static void phy_change(void *data);
407static void phy_timer(unsigned long data); 400static void phy_timer(unsigned long data);
408 401
@@ -868,4 +861,3 @@ static void phy_timer(unsigned long data)
868 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ); 861 mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
869} 862}
870 863
871#endif /* CONFIG_PHYCONTROL */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 33f7bdb5857c..6da1aa0706a1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -101,7 +101,6 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
101 return dev; 101 return dev;
102} 102}
103 103
104#ifdef CONFIG_PHYCONTROL
105/* phy_prepare_link: 104/* phy_prepare_link:
106 * 105 *
107 * description: Tells the PHY infrastructure to handle the 106 * description: Tells the PHY infrastructure to handle the
@@ -160,8 +159,6 @@ void phy_disconnect(struct phy_device *phydev)
160} 159}
161EXPORT_SYMBOL(phy_disconnect); 160EXPORT_SYMBOL(phy_disconnect);
162 161
163#endif /* CONFIG_PHYCONTROL */
164
165/* phy_attach: 162/* phy_attach:
166 * 163 *
167 * description: Called by drivers to attach to a particular PHY 164 * description: Called by drivers to attach to a particular PHY
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0df7e92b0bf8..d3c9958b00d0 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -863,7 +863,7 @@ static int __init ppp_init(void)
863 err = PTR_ERR(ppp_class); 863 err = PTR_ERR(ppp_class);
864 goto out_chrdev; 864 goto out_chrdev;
865 } 865 }
866 class_device_create(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp"); 866 class_device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
867 err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0), 867 err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0),
868 S_IFCHR|S_IRUSR|S_IWUSR, "ppp"); 868 S_IFCHR|S_IRUSR|S_IWUSR, "ppp");
869 if (err) 869 if (err)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index afb3f186b884..159b56a56ef4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1027,6 +1027,7 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
1027 .get_strings = rtl8169_get_strings, 1027 .get_strings = rtl8169_get_strings,
1028 .get_stats_count = rtl8169_get_stats_count, 1028 .get_stats_count = rtl8169_get_stats_count,
1029 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1029 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1030 .get_perm_addr = ethtool_op_get_perm_addr,
1030}; 1031};
1031 1032
1032static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum, 1033static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum,
@@ -1511,6 +1512,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1511 /* Get MAC address. FIXME: read EEPROM */ 1512 /* Get MAC address. FIXME: read EEPROM */
1512 for (i = 0; i < MAC_ADDR_LEN; i++) 1513 for (i = 0; i < MAC_ADDR_LEN; i++)
1513 dev->dev_addr[i] = RTL_R8(MAC0 + i); 1514 dev->dev_addr[i] = RTL_R8(MAC0 + i);
1515 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1514 1516
1515 dev->open = rtl8169_open; 1517 dev->open = rtl8169_open;
1516 dev->hard_start_xmit = rtl8169_start_xmit; 1518 dev->hard_start_xmit = rtl8169_start_xmit;
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
new file mode 100644
index 000000000000..12cde0604580
--- /dev/null
+++ b/drivers/net/rionet.c
@@ -0,0 +1,574 @@
1/*
2 * rionet - Ethernet driver over RapidIO messaging services
3 *
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/dma-mapping.h>
16#include <linux/delay.h>
17#include <linux/rio.h>
18#include <linux/rio_drv.h>
19#include <linux/rio_ids.h>
20
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/crc32.h>
25#include <linux/ethtool.h>
26
27#define DRV_NAME "rionet"
28#define DRV_VERSION "0.2"
29#define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
30#define DRV_DESC "Ethernet over RapidIO"
31
32MODULE_AUTHOR(DRV_AUTHOR);
33MODULE_DESCRIPTION(DRV_DESC);
34MODULE_LICENSE("GPL");
35
36#define RIONET_DEFAULT_MSGLEVEL \
37 (NETIF_MSG_DRV | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_RX_ERR | \
40 NETIF_MSG_TX_ERR)
41
42#define RIONET_DOORBELL_JOIN 0x1000
43#define RIONET_DOORBELL_LEAVE 0x1001
44
45#define RIONET_MAILBOX 0
46
47#define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
48#define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
49
50static LIST_HEAD(rionet_peers);
51
52struct rionet_private {
53 struct rio_mport *mport;
54 struct sk_buff *rx_skb[RIONET_RX_RING_SIZE];
55 struct sk_buff *tx_skb[RIONET_TX_RING_SIZE];
56 struct net_device_stats stats;
57 int rx_slot;
58 int tx_slot;
59 int tx_cnt;
60 int ack_slot;
61 spinlock_t lock;
62 spinlock_t tx_lock;
63 u32 msg_enable;
64};
65
66struct rionet_peer {
67 struct list_head node;
68 struct rio_dev *rdev;
69 struct resource *res;
70};
71
72static int rionet_check = 0;
73static int rionet_capable = 1;
74
75/*
76 * This is a fast lookup table for for translating TX
77 * Ethernet packets into a destination RIO device. It
78 * could be made into a hash table to save memory depending
79 * on system trade-offs.
80 */
81static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES];
82
83#define is_rionet_capable(pef, src_ops, dst_ops) \
84 ((pef & RIO_PEF_INB_MBOX) && \
85 (pef & RIO_PEF_INB_DOORBELL) && \
86 (src_ops & RIO_SRC_OPS_DOORBELL) && \
87 (dst_ops & RIO_DST_OPS_DOORBELL))
88#define dev_rionet_capable(dev) \
89 is_rionet_capable(dev->pef, dev->src_ops, dev->dst_ops)
90
91#define RIONET_MAC_MATCH(x) (*(u32 *)x == 0x00010001)
92#define RIONET_GET_DESTID(x) (*(u16 *)(x + 4))
93
94static struct net_device_stats *rionet_stats(struct net_device *ndev)
95{
96 struct rionet_private *rnet = ndev->priv;
97 return &rnet->stats;
98}
99
100static int rionet_rx_clean(struct net_device *ndev)
101{
102 int i;
103 int error = 0;
104 struct rionet_private *rnet = ndev->priv;
105 void *data;
106
107 i = rnet->rx_slot;
108
109 do {
110 if (!rnet->rx_skb[i])
111 continue;
112
113 if (!(data = rio_get_inb_message(rnet->mport, RIONET_MAILBOX)))
114 break;
115
116 rnet->rx_skb[i]->data = data;
117 skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
118 rnet->rx_skb[i]->dev = ndev;
119 rnet->rx_skb[i]->protocol =
120 eth_type_trans(rnet->rx_skb[i], ndev);
121 error = netif_rx(rnet->rx_skb[i]);
122
123 if (error == NET_RX_DROP) {
124 rnet->stats.rx_dropped++;
125 } else if (error == NET_RX_BAD) {
126 if (netif_msg_rx_err(rnet))
127 printk(KERN_WARNING "%s: bad rx packet\n",
128 DRV_NAME);
129 rnet->stats.rx_errors++;
130 } else {
131 rnet->stats.rx_packets++;
132 rnet->stats.rx_bytes += RIO_MAX_MSG_SIZE;
133 }
134
135 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != rnet->rx_slot);
136
137 return i;
138}
139
140static void rionet_rx_fill(struct net_device *ndev, int end)
141{
142 int i;
143 struct rionet_private *rnet = ndev->priv;
144
145 i = rnet->rx_slot;
146 do {
147 rnet->rx_skb[i] = dev_alloc_skb(RIO_MAX_MSG_SIZE);
148
149 if (!rnet->rx_skb[i])
150 break;
151
152 rio_add_inb_buffer(rnet->mport, RIONET_MAILBOX,
153 rnet->rx_skb[i]->data);
154 } while ((i = (i + 1) % RIONET_RX_RING_SIZE) != end);
155
156 rnet->rx_slot = i;
157}
158
159static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
160 struct rio_dev *rdev)
161{
162 struct rionet_private *rnet = ndev->priv;
163
164 rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
165 rnet->tx_skb[rnet->tx_slot] = skb;
166
167 rnet->stats.tx_packets++;
168 rnet->stats.tx_bytes += skb->len;
169
170 if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
171 netif_stop_queue(ndev);
172
173 ++rnet->tx_slot;
174 rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);
175
176 if (netif_msg_tx_queued(rnet))
177 printk(KERN_INFO "%s: queued skb %8.8x len %8.8x\n", DRV_NAME,
178 (u32) skb, skb->len);
179
180 return 0;
181}
182
183static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
184{
185 int i;
186 struct rionet_private *rnet = ndev->priv;
187 struct ethhdr *eth = (struct ethhdr *)skb->data;
188 u16 destid;
189 unsigned long flags;
190
191 local_irq_save(flags);
192 if (!spin_trylock(&rnet->tx_lock)) {
193 local_irq_restore(flags);
194 return NETDEV_TX_LOCKED;
195 }
196
197 if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
198 netif_stop_queue(ndev);
199 spin_unlock_irqrestore(&rnet->tx_lock, flags);
200 printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
201 ndev->name);
202 return NETDEV_TX_BUSY;
203 }
204
205 if (eth->h_dest[0] & 0x01) {
206 for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++)
207 if (rionet_active[i])
208 rionet_queue_tx_msg(skb, ndev,
209 rionet_active[i]);
210 } else if (RIONET_MAC_MATCH(eth->h_dest)) {
211 destid = RIONET_GET_DESTID(eth->h_dest);
212 if (rionet_active[destid])
213 rionet_queue_tx_msg(skb, ndev, rionet_active[destid]);
214 }
215
216 spin_unlock_irqrestore(&rnet->tx_lock, flags);
217
218 return 0;
219}
220
221static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u16 tid,
222 u16 info)
223{
224 struct net_device *ndev = dev_id;
225 struct rionet_private *rnet = ndev->priv;
226 struct rionet_peer *peer;
227
228 if (netif_msg_intr(rnet))
229 printk(KERN_INFO "%s: doorbell sid %4.4x tid %4.4x info %4.4x",
230 DRV_NAME, sid, tid, info);
231 if (info == RIONET_DOORBELL_JOIN) {
232 if (!rionet_active[sid]) {
233 list_for_each_entry(peer, &rionet_peers, node) {
234 if (peer->rdev->destid == sid)
235 rionet_active[sid] = peer->rdev;
236 }
237 rio_mport_send_doorbell(mport, sid,
238 RIONET_DOORBELL_JOIN);
239 }
240 } else if (info == RIONET_DOORBELL_LEAVE) {
241 rionet_active[sid] = NULL;
242 } else {
243 if (netif_msg_intr(rnet))
244 printk(KERN_WARNING "%s: unhandled doorbell\n",
245 DRV_NAME);
246 }
247}
248
249static void rionet_inb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
250{
251 int n;
252 struct net_device *ndev = dev_id;
253 struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
254
255 if (netif_msg_intr(rnet))
256 printk(KERN_INFO "%s: inbound message event, mbox %d slot %d\n",
257 DRV_NAME, mbox, slot);
258
259 spin_lock(&rnet->lock);
260 if ((n = rionet_rx_clean(ndev)) != rnet->rx_slot)
261 rionet_rx_fill(ndev, n);
262 spin_unlock(&rnet->lock);
263}
264
265static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbox, int slot)
266{
267 struct net_device *ndev = dev_id;
268 struct rionet_private *rnet = ndev->priv;
269
270 spin_lock(&rnet->lock);
271
272 if (netif_msg_intr(rnet))
273 printk(KERN_INFO
274 "%s: outbound message event, mbox %d slot %d\n",
275 DRV_NAME, mbox, slot);
276
277 while (rnet->tx_cnt && (rnet->ack_slot != slot)) {
278 /* dma unmap single */
279 dev_kfree_skb_irq(rnet->tx_skb[rnet->ack_slot]);
280 rnet->tx_skb[rnet->ack_slot] = NULL;
281 ++rnet->ack_slot;
282 rnet->ack_slot &= (RIONET_TX_RING_SIZE - 1);
283 rnet->tx_cnt--;
284 }
285
286 if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
287 netif_wake_queue(ndev);
288
289 spin_unlock(&rnet->lock);
290}
291
292static int rionet_open(struct net_device *ndev)
293{
294 int i, rc = 0;
295 struct rionet_peer *peer, *tmp;
296 u32 pwdcsr;
297 struct rionet_private *rnet = ndev->priv;
298
299 if (netif_msg_ifup(rnet))
300 printk(KERN_INFO "%s: open\n", DRV_NAME);
301
302 if ((rc = rio_request_inb_dbell(rnet->mport,
303 (void *)ndev,
304 RIONET_DOORBELL_JOIN,
305 RIONET_DOORBELL_LEAVE,
306 rionet_dbell_event)) < 0)
307 goto out;
308
309 if ((rc = rio_request_inb_mbox(rnet->mport,
310 (void *)ndev,
311 RIONET_MAILBOX,
312 RIONET_RX_RING_SIZE,
313 rionet_inb_msg_event)) < 0)
314 goto out;
315
316 if ((rc = rio_request_outb_mbox(rnet->mport,
317 (void *)ndev,
318 RIONET_MAILBOX,
319 RIONET_TX_RING_SIZE,
320 rionet_outb_msg_event)) < 0)
321 goto out;
322
323 /* Initialize inbound message ring */
324 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
325 rnet->rx_skb[i] = NULL;
326 rnet->rx_slot = 0;
327 rionet_rx_fill(ndev, 0);
328
329 rnet->tx_slot = 0;
330 rnet->tx_cnt = 0;
331 rnet->ack_slot = 0;
332
333 netif_carrier_on(ndev);
334 netif_start_queue(ndev);
335
336 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
337 if (!(peer->res = rio_request_outb_dbell(peer->rdev,
338 RIONET_DOORBELL_JOIN,
339 RIONET_DOORBELL_LEAVE)))
340 {
341 printk(KERN_ERR "%s: error requesting doorbells\n",
342 DRV_NAME);
343 continue;
344 }
345
346 /*
347 * If device has initialized inbound doorbells,
348 * send a join message
349 */
350 rio_read_config_32(peer->rdev, RIO_WRITE_PORT_CSR, &pwdcsr);
351 if (pwdcsr & RIO_DOORBELL_AVAIL)
352 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
353 }
354
355 out:
356 return rc;
357}
358
359static int rionet_close(struct net_device *ndev)
360{
361 struct rionet_private *rnet = (struct rionet_private *)ndev->priv;
362 struct rionet_peer *peer, *tmp;
363 int i;
364
365 if (netif_msg_ifup(rnet))
366 printk(KERN_INFO "%s: close\n", DRV_NAME);
367
368 netif_stop_queue(ndev);
369 netif_carrier_off(ndev);
370
371 for (i = 0; i < RIONET_RX_RING_SIZE; i++)
372 if (rnet->rx_skb[i])
373 kfree_skb(rnet->rx_skb[i]);
374
375 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
376 if (rionet_active[peer->rdev->destid]) {
377 rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
378 rionet_active[peer->rdev->destid] = NULL;
379 }
380 rio_release_outb_dbell(peer->rdev, peer->res);
381 }
382
383 rio_release_inb_dbell(rnet->mport, RIONET_DOORBELL_JOIN,
384 RIONET_DOORBELL_LEAVE);
385 rio_release_inb_mbox(rnet->mport, RIONET_MAILBOX);
386 rio_release_outb_mbox(rnet->mport, RIONET_MAILBOX);
387
388 return 0;
389}
390
391static void rionet_remove(struct rio_dev *rdev)
392{
393 struct net_device *ndev = NULL;
394 struct rionet_peer *peer, *tmp;
395
396 unregister_netdev(ndev);
397 kfree(ndev);
398
399 list_for_each_entry_safe(peer, tmp, &rionet_peers, node) {
400 list_del(&peer->node);
401 kfree(peer);
402 }
403}
404
405static void rionet_get_drvinfo(struct net_device *ndev,
406 struct ethtool_drvinfo *info)
407{
408 struct rionet_private *rnet = ndev->priv;
409
410 strcpy(info->driver, DRV_NAME);
411 strcpy(info->version, DRV_VERSION);
412 strcpy(info->fw_version, "n/a");
413 strcpy(info->bus_info, rnet->mport->name);
414}
415
416static u32 rionet_get_msglevel(struct net_device *ndev)
417{
418 struct rionet_private *rnet = ndev->priv;
419
420 return rnet->msg_enable;
421}
422
423static void rionet_set_msglevel(struct net_device *ndev, u32 value)
424{
425 struct rionet_private *rnet = ndev->priv;
426
427 rnet->msg_enable = value;
428}
429
430static struct ethtool_ops rionet_ethtool_ops = {
431 .get_drvinfo = rionet_get_drvinfo,
432 .get_msglevel = rionet_get_msglevel,
433 .set_msglevel = rionet_set_msglevel,
434 .get_link = ethtool_op_get_link,
435};
436
437static int rionet_setup_netdev(struct rio_mport *mport)
438{
439 int rc = 0;
440 struct net_device *ndev = NULL;
441 struct rionet_private *rnet;
442 u16 device_id;
443
444 /* Allocate our net_device structure */
445 ndev = alloc_etherdev(sizeof(struct rionet_private));
446 if (ndev == NULL) {
447 printk(KERN_INFO "%s: could not allocate ethernet device.\n",
448 DRV_NAME);
449 rc = -ENOMEM;
450 goto out;
451 }
452
453 /* Set up private area */
454 rnet = (struct rionet_private *)ndev->priv;
455 rnet->mport = mport;
456
457 /* Set the default MAC address */
458 device_id = rio_local_get_device_id(mport);
459 ndev->dev_addr[0] = 0x00;
460 ndev->dev_addr[1] = 0x01;
461 ndev->dev_addr[2] = 0x00;
462 ndev->dev_addr[3] = 0x01;
463 ndev->dev_addr[4] = device_id >> 8;
464 ndev->dev_addr[5] = device_id & 0xff;
465
466 /* Fill in the driver function table */
467 ndev->open = &rionet_open;
468 ndev->hard_start_xmit = &rionet_start_xmit;
469 ndev->stop = &rionet_close;
470 ndev->get_stats = &rionet_stats;
471 ndev->mtu = RIO_MAX_MSG_SIZE - 14;
472 ndev->features = NETIF_F_LLTX;
473 SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
474
475 SET_MODULE_OWNER(ndev);
476
477 spin_lock_init(&rnet->lock);
478 spin_lock_init(&rnet->tx_lock);
479
480 rnet->msg_enable = RIONET_DEFAULT_MSGLEVEL;
481
482 rc = register_netdev(ndev);
483 if (rc != 0)
484 goto out;
485
486 printk("%s: %s %s Version %s, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
487 ndev->name,
488 DRV_NAME,
489 DRV_DESC,
490 DRV_VERSION,
491 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
492 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
493
494 out:
495 return rc;
496}
497
498/*
499 * XXX Make multi-net safe
500 */
501static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)
502{
503 int rc = -ENODEV;
504 u32 lpef, lsrc_ops, ldst_ops;
505 struct rionet_peer *peer;
506
507 /* If local device is not rionet capable, give up quickly */
508 if (!rionet_capable)
509 goto out;
510
511 /*
512 * First time through, make sure local device is rionet
513 * capable, setup netdev, and set flags so this is skipped
514 * on later probes
515 */
516 if (!rionet_check) {
517 rio_local_read_config_32(rdev->net->hport, RIO_PEF_CAR, &lpef);
518 rio_local_read_config_32(rdev->net->hport, RIO_SRC_OPS_CAR,
519 &lsrc_ops);
520 rio_local_read_config_32(rdev->net->hport, RIO_DST_OPS_CAR,
521 &ldst_ops);
522 if (!is_rionet_capable(lpef, lsrc_ops, ldst_ops)) {
523 printk(KERN_ERR
524 "%s: local device is not network capable\n",
525 DRV_NAME);
526 rionet_check = 1;
527 rionet_capable = 0;
528 goto out;
529 }
530
531 rc = rionet_setup_netdev(rdev->net->hport);
532 rionet_check = 1;
533 }
534
535 /*
536 * If the remote device has mailbox/doorbell capabilities,
537 * add it to the peer list.
538 */
539 if (dev_rionet_capable(rdev)) {
540 if (!(peer = kmalloc(sizeof(struct rionet_peer), GFP_KERNEL))) {
541 rc = -ENOMEM;
542 goto out;
543 }
544 peer->rdev = rdev;
545 list_add_tail(&peer->node, &rionet_peers);
546 }
547
548 out:
549 return rc;
550}
551
552static struct rio_device_id rionet_id_table[] = {
553 {RIO_DEVICE(RIO_ANY_ID, RIO_ANY_ID)}
554};
555
556static struct rio_driver rionet_driver = {
557 .name = "rionet",
558 .id_table = rionet_id_table,
559 .probe = rionet_probe,
560 .remove = rionet_remove,
561};
562
563static int __init rionet_init(void)
564{
565 return rio_register_driver(&rionet_driver);
566}
567
568static void __exit rionet_exit(void)
569{
570 rio_unregister_driver(&rionet_driver);
571}
572
573module_init(rionet_init);
574module_exit(rionet_exit);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index ec1a18d189a1..19c2df9c86fe 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1710,10 +1710,8 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1710 error = -EFAULT; 1710 error = -EFAULT;
1711 } 1711 }
1712 wf_out: 1712 wf_out:
1713 if (oldimage) 1713 kfree(oldimage);
1714 kfree(oldimage); 1714 kfree(image);
1715 if (image)
1716 kfree(image);
1717 return error; 1715 return error;
1718 1716
1719 case SIOCRRID: 1717 case SIOCRRID:
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 7cefe5507b9e..00179bc3437f 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -814,6 +814,17 @@ typedef struct _XENA_dev_config {
814 u64 rxgxs_ber_0; /* CHANGED */ 814 u64 rxgxs_ber_0; /* CHANGED */
815 u64 rxgxs_ber_1; /* CHANGED */ 815 u64 rxgxs_ber_1; /* CHANGED */
816 816
817 u64 spi_control;
818#define SPI_CONTROL_KEY(key) vBIT(key,0,4)
819#define SPI_CONTROL_BYTECNT(cnt) vBIT(cnt,29,3)
820#define SPI_CONTROL_CMD(cmd) vBIT(cmd,32,8)
821#define SPI_CONTROL_ADDR(addr) vBIT(addr,40,24)
822#define SPI_CONTROL_SEL1 BIT(4)
823#define SPI_CONTROL_REQ BIT(7)
824#define SPI_CONTROL_NACK BIT(5)
825#define SPI_CONTROL_DONE BIT(6)
826 u64 spi_data;
827#define SPI_DATA_WRITE(data,len) vBIT(data,0,len)
817} XENA_dev_config_t; 828} XENA_dev_config_t;
818 829
819#define XENA_REG_SPACE sizeof(XENA_dev_config_t) 830#define XENA_REG_SPACE sizeof(XENA_dev_config_t)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index dd451e099a4c..3f5e93aad5c7 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -65,9 +65,11 @@
65#include "s2io.h" 65#include "s2io.h"
66#include "s2io-regs.h" 66#include "s2io-regs.h"
67 67
68#define DRV_VERSION "Version 2.0.9.1"
69
68/* S2io Driver name & version. */ 70/* S2io Driver name & version. */
69static char s2io_driver_name[] = "Neterion"; 71static char s2io_driver_name[] = "Neterion";
70static char s2io_driver_version[] = "Version 2.0.8.1"; 72static char s2io_driver_version[] = DRV_VERSION;
71 73
72static inline int RXD_IS_UP2DT(RxD_t *rxdp) 74static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73{ 75{
@@ -307,6 +309,8 @@ static unsigned int indicate_max_pkts;
307#endif 309#endif
308/* Frequency of Rx desc syncs expressed as power of 2 */ 310/* Frequency of Rx desc syncs expressed as power of 2 */
309static unsigned int rxsync_frequency = 3; 311static unsigned int rxsync_frequency = 3;
312/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
313static unsigned int intr_type = 0;
310 314
311/* 315/*
312 * S2IO device table. 316 * S2IO device table.
@@ -701,8 +705,7 @@ static void free_shared_mem(struct s2io_nic *nic)
701 } 705 }
702 kfree(mac_control->rings[i].ba[j]); 706 kfree(mac_control->rings[i].ba[j]);
703 } 707 }
704 if (mac_control->rings[i].ba) 708 kfree(mac_control->rings[i].ba);
705 kfree(mac_control->rings[i].ba);
706 } 709 }
707#endif 710#endif
708 711
@@ -1396,8 +1399,13 @@ static int init_nic(struct s2io_nic *nic)
1396 writeq(val64, &bar0->rti_data1_mem); 1399 writeq(val64, &bar0->rti_data1_mem);
1397 1400
1398 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) | 1401 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1399 RTI_DATA2_MEM_RX_UFC_B(0x2) | 1402 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1400 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80); 1403 if (nic->intr_type == MSI_X)
1404 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1405 RTI_DATA2_MEM_RX_UFC_D(0x40));
1406 else
1407 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1408 RTI_DATA2_MEM_RX_UFC_D(0x80));
1401 writeq(val64, &bar0->rti_data2_mem); 1409 writeq(val64, &bar0->rti_data2_mem);
1402 1410
1403 for (i = 0; i < config->rx_ring_num; i++) { 1411 for (i = 0; i < config->rx_ring_num; i++) {
@@ -1507,17 +1515,15 @@ static int init_nic(struct s2io_nic *nic)
1507#define LINK_UP_DOWN_INTERRUPT 1 1515#define LINK_UP_DOWN_INTERRUPT 1
1508#define MAC_RMAC_ERR_TIMER 2 1516#define MAC_RMAC_ERR_TIMER 2
1509 1517
1510#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1511#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1512#else
1513int s2io_link_fault_indication(nic_t *nic) 1518int s2io_link_fault_indication(nic_t *nic)
1514{ 1519{
1520 if (nic->intr_type != INTA)
1521 return MAC_RMAC_ERR_TIMER;
1515 if (nic->device_type == XFRAME_II_DEVICE) 1522 if (nic->device_type == XFRAME_II_DEVICE)
1516 return LINK_UP_DOWN_INTERRUPT; 1523 return LINK_UP_DOWN_INTERRUPT;
1517 else 1524 else
1518 return MAC_RMAC_ERR_TIMER; 1525 return MAC_RMAC_ERR_TIMER;
1519} 1526}
1520#endif
1521 1527
1522/** 1528/**
1523 * en_dis_able_nic_intrs - Enable or Disable the interrupts 1529 * en_dis_able_nic_intrs - Enable or Disable the interrupts
@@ -1941,11 +1947,14 @@ static int start_nic(struct s2io_nic *nic)
1941 } 1947 }
1942 1948
1943 /* Enable select interrupts */ 1949 /* Enable select interrupts */
1944 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 1950 if (nic->intr_type != INTA)
1945 interruptible |= TX_PIC_INTR | RX_PIC_INTR; 1951 en_dis_able_nic_intrs(nic, ENA_ALL_INTRS, DISABLE_INTRS);
1946 interruptible |= TX_MAC_INTR | RX_MAC_INTR; 1952 else {
1947 1953 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1948 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS); 1954 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1955 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1956 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1957 }
1949 1958
1950 /* 1959 /*
1951 * With some switches, link might be already up at this point. 1960 * With some switches, link might be already up at this point.
@@ -2633,11 +2642,11 @@ static void tx_intr_handler(fifo_info_t *fifo_data)
2633 err = txdlp->Control_1 & TXD_T_CODE; 2642 err = txdlp->Control_1 & TXD_T_CODE;
2634 if ((err >> 48) == 0xA) { 2643 if ((err >> 48) == 0xA) {
2635 DBG_PRINT(TX_DBG, "TxD returned due \ 2644 DBG_PRINT(TX_DBG, "TxD returned due \
2636 to loss of link\n"); 2645to loss of link\n");
2637 } 2646 }
2638 else { 2647 else {
2639 DBG_PRINT(ERR_DBG, "***TxD error \ 2648 DBG_PRINT(ERR_DBG, "***TxD error \
2640 %llx\n", err); 2649%llx\n", err);
2641 } 2650 }
2642 } 2651 }
2643 2652
@@ -2854,6 +2863,9 @@ void s2io_reset(nic_t * sp)
2854 /* Set swapper to enable I/O register access */ 2863 /* Set swapper to enable I/O register access */
2855 s2io_set_swapper(sp); 2864 s2io_set_swapper(sp);
2856 2865
2866 /* Restore the MSIX table entries from local variables */
2867 restore_xmsi_data(sp);
2868
2857 /* Clear certain PCI/PCI-X fields after reset */ 2869 /* Clear certain PCI/PCI-X fields after reset */
2858 if (sp->device_type == XFRAME_II_DEVICE) { 2870 if (sp->device_type == XFRAME_II_DEVICE) {
2859 /* Clear parity err detect bit */ 2871 /* Clear parity err detect bit */
@@ -2983,8 +2995,9 @@ int s2io_set_swapper(nic_t * sp)
2983 SWAPPER_CTRL_RXD_W_FE | 2995 SWAPPER_CTRL_RXD_W_FE |
2984 SWAPPER_CTRL_RXF_W_FE | 2996 SWAPPER_CTRL_RXF_W_FE |
2985 SWAPPER_CTRL_XMSI_FE | 2997 SWAPPER_CTRL_XMSI_FE |
2986 SWAPPER_CTRL_XMSI_SE |
2987 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); 2998 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2999 if (sp->intr_type == INTA)
3000 val64 |= SWAPPER_CTRL_XMSI_SE;
2988 writeq(val64, &bar0->swapper_ctrl); 3001 writeq(val64, &bar0->swapper_ctrl);
2989#else 3002#else
2990 /* 3003 /*
@@ -3005,8 +3018,9 @@ int s2io_set_swapper(nic_t * sp)
3005 SWAPPER_CTRL_RXD_W_SE | 3018 SWAPPER_CTRL_RXD_W_SE |
3006 SWAPPER_CTRL_RXF_W_FE | 3019 SWAPPER_CTRL_RXF_W_FE |
3007 SWAPPER_CTRL_XMSI_FE | 3020 SWAPPER_CTRL_XMSI_FE |
3008 SWAPPER_CTRL_XMSI_SE |
3009 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE); 3021 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3022 if (sp->intr_type == INTA)
3023 val64 |= SWAPPER_CTRL_XMSI_SE;
3010 writeq(val64, &bar0->swapper_ctrl); 3024 writeq(val64, &bar0->swapper_ctrl);
3011#endif 3025#endif
3012 val64 = readq(&bar0->swapper_ctrl); 3026 val64 = readq(&bar0->swapper_ctrl);
@@ -3028,6 +3042,201 @@ int s2io_set_swapper(nic_t * sp)
3028 return SUCCESS; 3042 return SUCCESS;
3029} 3043}
3030 3044
3045int wait_for_msix_trans(nic_t *nic, int i)
3046{
3047 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3048 u64 val64;
3049 int ret = 0, cnt = 0;
3050
3051 do {
3052 val64 = readq(&bar0->xmsi_access);
3053 if (!(val64 & BIT(15)))
3054 break;
3055 mdelay(1);
3056 cnt++;
3057 } while(cnt < 5);
3058 if (cnt == 5) {
3059 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3060 ret = 1;
3061 }
3062
3063 return ret;
3064}
3065
3066void restore_xmsi_data(nic_t *nic)
3067{
3068 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3069 u64 val64;
3070 int i;
3071
3072 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3073 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3074 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3075 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3076 writeq(val64, &bar0->xmsi_access);
3077 if (wait_for_msix_trans(nic, i)) {
3078 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3079 continue;
3080 }
3081 }
3082}
3083
3084void store_xmsi_data(nic_t *nic)
3085{
3086 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3087 u64 val64, addr, data;
3088 int i;
3089
3090 /* Store and display */
3091 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3092 val64 = (BIT(15) | vBIT(i, 26, 6));
3093 writeq(val64, &bar0->xmsi_access);
3094 if (wait_for_msix_trans(nic, i)) {
3095 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3096 continue;
3097 }
3098 addr = readq(&bar0->xmsi_address);
3099 data = readq(&bar0->xmsi_data);
3100 if (addr && data) {
3101 nic->msix_info[i].addr = addr;
3102 nic->msix_info[i].data = data;
3103 }
3104 }
3105}
3106
3107int s2io_enable_msi(nic_t *nic)
3108{
3109 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3110 u16 msi_ctrl, msg_val;
3111 struct config_param *config = &nic->config;
3112 struct net_device *dev = nic->dev;
3113 u64 val64, tx_mat, rx_mat;
3114 int i, err;
3115
3116 val64 = readq(&bar0->pic_control);
3117 val64 &= ~BIT(1);
3118 writeq(val64, &bar0->pic_control);
3119
3120 err = pci_enable_msi(nic->pdev);
3121 if (err) {
3122 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3123 nic->dev->name);
3124 return err;
3125 }
3126
3127 /*
3128 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3129 * for interrupt handling.
3130 */
3131 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3132 msg_val ^= 0x1;
3133 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3134 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3135
3136 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3137 msi_ctrl |= 0x10;
3138 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3139
3140 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3141 tx_mat = readq(&bar0->tx_mat0_n[0]);
3142 for (i=0; i<config->tx_fifo_num; i++) {
3143 tx_mat |= TX_MAT_SET(i, 1);
3144 }
3145 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3146
3147 rx_mat = readq(&bar0->rx_mat);
3148 for (i=0; i<config->rx_ring_num; i++) {
3149 rx_mat |= RX_MAT_SET(i, 1);
3150 }
3151 writeq(rx_mat, &bar0->rx_mat);
3152
3153 dev->irq = nic->pdev->irq;
3154 return 0;
3155}
3156
3157int s2io_enable_msi_x(nic_t *nic)
3158{
3159 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3160 u64 tx_mat, rx_mat;
3161 u16 msi_control; /* Temp variable */
3162 int ret, i, j, msix_indx = 1;
3163
3164 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3165 GFP_KERNEL);
3166 if (nic->entries == NULL) {
3167 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3168 return -ENOMEM;
3169 }
3170 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3171
3172 nic->s2io_entries =
3173 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3174 GFP_KERNEL);
3175 if (nic->s2io_entries == NULL) {
3176 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3177 kfree(nic->entries);
3178 return -ENOMEM;
3179 }
3180 memset(nic->s2io_entries, 0,
3181 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3182
3183 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3184 nic->entries[i].entry = i;
3185 nic->s2io_entries[i].entry = i;
3186 nic->s2io_entries[i].arg = NULL;
3187 nic->s2io_entries[i].in_use = 0;
3188 }
3189
3190 tx_mat = readq(&bar0->tx_mat0_n[0]);
3191 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3192 tx_mat |= TX_MAT_SET(i, msix_indx);
3193 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3194 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3195 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3196 }
3197 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3198
3199 if (!nic->config.bimodal) {
3200 rx_mat = readq(&bar0->rx_mat);
3201 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3202 rx_mat |= RX_MAT_SET(j, msix_indx);
3203 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3204 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3205 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3206 }
3207 writeq(rx_mat, &bar0->rx_mat);
3208 } else {
3209 tx_mat = readq(&bar0->tx_mat0_n[7]);
3210 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3211 tx_mat |= TX_MAT_SET(i, msix_indx);
3212 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3213 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3214 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3215 }
3216 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3217 }
3218
3219 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3220 if (ret) {
3221 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3222 kfree(nic->entries);
3223 kfree(nic->s2io_entries);
3224 nic->entries = NULL;
3225 nic->s2io_entries = NULL;
3226 return -ENOMEM;
3227 }
3228
3229 /*
3230 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3231 * in the herc NIC. (Temp change, needs to be removed later)
3232 */
3233 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3234 msi_control |= 0x1; /* Enable MSI */
3235 pci_write_config_word(nic->pdev, 0x42, msi_control);
3236
3237 return 0;
3238}
3239
3031/* ********************************************************* * 3240/* ********************************************************* *
3032 * Functions defined below concern the OS part of the driver * 3241 * Functions defined below concern the OS part of the driver *
3033 * ********************************************************* */ 3242 * ********************************************************* */
@@ -3048,6 +3257,8 @@ int s2io_open(struct net_device *dev)
3048{ 3257{
3049 nic_t *sp = dev->priv; 3258 nic_t *sp = dev->priv;
3050 int err = 0; 3259 int err = 0;
3260 int i;
3261 u16 msi_control; /* Temp variable */
3051 3262
3052 /* 3263 /*
3053 * Make sure you have link off by default every time 3264 * Make sure you have link off by default every time
@@ -3064,13 +3275,55 @@ int s2io_open(struct net_device *dev)
3064 goto hw_init_failed; 3275 goto hw_init_failed;
3065 } 3276 }
3066 3277
3278 /* Store the values of the MSIX table in the nic_t structure */
3279 store_xmsi_data(sp);
3280
3067 /* After proper initialization of H/W, register ISR */ 3281 /* After proper initialization of H/W, register ISR */
3068 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ, 3282 if (sp->intr_type == MSI) {
3069 sp->name, dev); 3283 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
3070 if (err) { 3284 SA_SHIRQ, sp->name, dev);
3071 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 3285 if (err) {
3072 dev->name); 3286 DBG_PRINT(ERR_DBG, "%s: MSI registration \
3073 goto isr_registration_failed; 3287failed\n", dev->name);
3288 goto isr_registration_failed;
3289 }
3290 }
3291 if (sp->intr_type == MSI_X) {
3292 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
3293 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
3294 sprintf(sp->desc1, "%s:MSI-X-%d-TX",
3295 dev->name, i);
3296 err = request_irq(sp->entries[i].vector,
3297 s2io_msix_fifo_handle, 0, sp->desc1,
3298 sp->s2io_entries[i].arg);
3299 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc1,
3300 sp->msix_info[i].addr);
3301 } else {
3302 sprintf(sp->desc2, "%s:MSI-X-%d-RX",
3303 dev->name, i);
3304 err = request_irq(sp->entries[i].vector,
3305 s2io_msix_ring_handle, 0, sp->desc2,
3306 sp->s2io_entries[i].arg);
3307 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc2,
3308 sp->msix_info[i].addr);
3309 }
3310 if (err) {
3311 DBG_PRINT(ERR_DBG, "%s: MSI-X-%d registration \
3312failed\n", dev->name, i);
3313 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
3314 goto isr_registration_failed;
3315 }
3316 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
3317 }
3318 }
3319 if (sp->intr_type == INTA) {
3320 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
3321 sp->name, dev);
3322 if (err) {
3323 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
3324 dev->name);
3325 goto isr_registration_failed;
3326 }
3074 } 3327 }
3075 3328
3076 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) { 3329 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
@@ -3083,11 +3336,37 @@ int s2io_open(struct net_device *dev)
3083 return 0; 3336 return 0;
3084 3337
3085setting_mac_address_failed: 3338setting_mac_address_failed:
3086 free_irq(sp->pdev->irq, dev); 3339 if (sp->intr_type != MSI_X)
3340 free_irq(sp->pdev->irq, dev);
3087isr_registration_failed: 3341isr_registration_failed:
3088 del_timer_sync(&sp->alarm_timer); 3342 del_timer_sync(&sp->alarm_timer);
3343 if (sp->intr_type == MSI_X) {
3344 if (sp->device_type == XFRAME_II_DEVICE) {
3345 for (i=1; (sp->s2io_entries[i].in_use ==
3346 MSIX_REGISTERED_SUCCESS); i++) {
3347 int vector = sp->entries[i].vector;
3348 void *arg = sp->s2io_entries[i].arg;
3349
3350 free_irq(vector, arg);
3351 }
3352 pci_disable_msix(sp->pdev);
3353
3354 /* Temp */
3355 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3356 msi_control &= 0xFFFE; /* Disable MSI */
3357 pci_write_config_word(sp->pdev, 0x42, msi_control);
3358 }
3359 }
3360 else if (sp->intr_type == MSI)
3361 pci_disable_msi(sp->pdev);
3089 s2io_reset(sp); 3362 s2io_reset(sp);
3090hw_init_failed: 3363hw_init_failed:
3364 if (sp->intr_type == MSI_X) {
3365 if (sp->entries)
3366 kfree(sp->entries);
3367 if (sp->s2io_entries)
3368 kfree(sp->s2io_entries);
3369 }
3091 return err; 3370 return err;
3092} 3371}
3093 3372
@@ -3107,12 +3386,35 @@ hw_init_failed:
3107int s2io_close(struct net_device *dev) 3386int s2io_close(struct net_device *dev)
3108{ 3387{
3109 nic_t *sp = dev->priv; 3388 nic_t *sp = dev->priv;
3389 int i;
3390 u16 msi_control;
3391
3110 flush_scheduled_work(); 3392 flush_scheduled_work();
3111 netif_stop_queue(dev); 3393 netif_stop_queue(dev);
3112 /* Reset card, kill tasklet and free Tx and Rx buffers. */ 3394 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3113 s2io_card_down(sp); 3395 s2io_card_down(sp);
3114 3396
3115 free_irq(sp->pdev->irq, dev); 3397 if (sp->intr_type == MSI_X) {
3398 if (sp->device_type == XFRAME_II_DEVICE) {
3399 for (i=1; (sp->s2io_entries[i].in_use ==
3400 MSIX_REGISTERED_SUCCESS); i++) {
3401 int vector = sp->entries[i].vector;
3402 void *arg = sp->s2io_entries[i].arg;
3403
3404 free_irq(vector, arg);
3405 }
3406 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3407 msi_control &= 0xFFFE; /* Disable MSI */
3408 pci_write_config_word(sp->pdev, 0x42, msi_control);
3409
3410 pci_disable_msix(sp->pdev);
3411 }
3412 }
3413 else {
3414 free_irq(sp->pdev->irq, dev);
3415 if (sp->intr_type == MSI)
3416 pci_disable_msi(sp->pdev);
3417 }
3116 sp->device_close_flag = TRUE; /* Device is shut down. */ 3418 sp->device_close_flag = TRUE; /* Device is shut down. */
3117 return 0; 3419 return 0;
3118} 3420}
@@ -3278,6 +3580,104 @@ s2io_alarm_handle(unsigned long data)
3278 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3580 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3279} 3581}
3280 3582
3583static irqreturn_t
3584s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3585{
3586 struct net_device *dev = (struct net_device *) dev_id;
3587 nic_t *sp = dev->priv;
3588 int i;
3589 int ret;
3590 mac_info_t *mac_control;
3591 struct config_param *config;
3592
3593 atomic_inc(&sp->isr_cnt);
3594 mac_control = &sp->mac_control;
3595 config = &sp->config;
3596 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3597
3598 /* If Intr is because of Rx Traffic */
3599 for (i = 0; i < config->rx_ring_num; i++)
3600 rx_intr_handler(&mac_control->rings[i]);
3601
3602 /* If Intr is because of Tx Traffic */
3603 for (i = 0; i < config->tx_fifo_num; i++)
3604 tx_intr_handler(&mac_control->fifos[i]);
3605
3606 /*
3607 * If the Rx buffer count is below the panic threshold then
3608 * reallocate the buffers from the interrupt handler itself,
3609 * else schedule a tasklet to reallocate the buffers.
3610 */
3611 for (i = 0; i < config->rx_ring_num; i++) {
3612 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3613 int level = rx_buffer_level(sp, rxb_size, i);
3614
3615 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3616 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3617 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3618 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3619 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3620 dev->name);
3621 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3622 clear_bit(0, (&sp->tasklet_status));
3623 atomic_dec(&sp->isr_cnt);
3624 return IRQ_HANDLED;
3625 }
3626 clear_bit(0, (&sp->tasklet_status));
3627 } else if (level == LOW) {
3628 tasklet_schedule(&sp->task);
3629 }
3630 }
3631
3632 atomic_dec(&sp->isr_cnt);
3633 return IRQ_HANDLED;
3634}
3635
3636static irqreturn_t
3637s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
3638{
3639 ring_info_t *ring = (ring_info_t *)dev_id;
3640 nic_t *sp = ring->nic;
3641 int rxb_size, level, rng_n;
3642
3643 atomic_inc(&sp->isr_cnt);
3644 rx_intr_handler(ring);
3645
3646 rng_n = ring->ring_no;
3647 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3648 level = rx_buffer_level(sp, rxb_size, rng_n);
3649
3650 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3651 int ret;
3652 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3653 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3654 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3655 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3656 __FUNCTION__);
3657 clear_bit(0, (&sp->tasklet_status));
3658 return IRQ_HANDLED;
3659 }
3660 clear_bit(0, (&sp->tasklet_status));
3661 } else if (level == LOW) {
3662 tasklet_schedule(&sp->task);
3663 }
3664 atomic_dec(&sp->isr_cnt);
3665
3666 return IRQ_HANDLED;
3667}
3668
3669static irqreturn_t
3670s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs)
3671{
3672 fifo_info_t *fifo = (fifo_info_t *)dev_id;
3673 nic_t *sp = fifo->nic;
3674
3675 atomic_inc(&sp->isr_cnt);
3676 tx_intr_handler(fifo);
3677 atomic_dec(&sp->isr_cnt);
3678 return IRQ_HANDLED;
3679}
3680
3281static void s2io_txpic_intr_handle(nic_t *sp) 3681static void s2io_txpic_intr_handle(nic_t *sp)
3282{ 3682{
3283 XENA_dev_config_t __iomem *bar0 = sp->bar0; 3683 XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3778,11 +4178,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3778{ 4178{
3779 nic_t *sp = dev->priv; 4179 nic_t *sp = dev->priv;
3780 4180
3781 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name)); 4181 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
3782 strncpy(info->version, s2io_driver_version, 4182 strncpy(info->version, s2io_driver_version, sizeof(info->version));
3783 sizeof(s2io_driver_version)); 4183 strncpy(info->fw_version, "", sizeof(info->fw_version));
3784 strncpy(info->fw_version, "", 32); 4184 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
3785 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3786 info->regdump_len = XENA_REG_SPACE; 4185 info->regdump_len = XENA_REG_SPACE;
3787 info->eedump_len = XENA_EEPROM_SPACE; 4186 info->eedump_len = XENA_EEPROM_SPACE;
3788 info->testinfo_len = S2IO_TEST_LEN; 4187 info->testinfo_len = S2IO_TEST_LEN;
@@ -3978,29 +4377,53 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
3978 */ 4377 */
3979 4378
3980#define S2IO_DEV_ID 5 4379#define S2IO_DEV_ID 5
3981static int read_eeprom(nic_t * sp, int off, u32 * data) 4380static int read_eeprom(nic_t * sp, int off, u64 * data)
3982{ 4381{
3983 int ret = -1; 4382 int ret = -1;
3984 u32 exit_cnt = 0; 4383 u32 exit_cnt = 0;
3985 u64 val64; 4384 u64 val64;
3986 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4385 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3987 4386
3988 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4387 if (sp->device_type == XFRAME_I_DEVICE) {
3989 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ | 4388 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3990 I2C_CONTROL_CNTL_START; 4389 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3991 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); 4390 I2C_CONTROL_CNTL_START;
4391 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3992 4392
3993 while (exit_cnt < 5) { 4393 while (exit_cnt < 5) {
3994 val64 = readq(&bar0->i2c_control); 4394 val64 = readq(&bar0->i2c_control);
3995 if (I2C_CONTROL_CNTL_END(val64)) { 4395 if (I2C_CONTROL_CNTL_END(val64)) {
3996 *data = I2C_CONTROL_GET_DATA(val64); 4396 *data = I2C_CONTROL_GET_DATA(val64);
3997 ret = 0; 4397 ret = 0;
3998 break; 4398 break;
4399 }
4400 msleep(50);
4401 exit_cnt++;
3999 } 4402 }
4000 msleep(50);
4001 exit_cnt++;
4002 } 4403 }
4003 4404
4405 if (sp->device_type == XFRAME_II_DEVICE) {
4406 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4407 SPI_CONTROL_BYTECNT(0x3) |
4408 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4409 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4410 val64 |= SPI_CONTROL_REQ;
4411 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4412 while (exit_cnt < 5) {
4413 val64 = readq(&bar0->spi_control);
4414 if (val64 & SPI_CONTROL_NACK) {
4415 ret = 1;
4416 break;
4417 } else if (val64 & SPI_CONTROL_DONE) {
4418 *data = readq(&bar0->spi_data);
4419 *data &= 0xffffff;
4420 ret = 0;
4421 break;
4422 }
4423 msleep(50);
4424 exit_cnt++;
4425 }
4426 }
4004 return ret; 4427 return ret;
4005} 4428}
4006 4429
@@ -4019,28 +4442,53 @@ static int read_eeprom(nic_t * sp, int off, u32 * data)
4019 * 0 on success, -1 on failure. 4442 * 0 on success, -1 on failure.
4020 */ 4443 */
4021 4444
4022static int write_eeprom(nic_t * sp, int off, u32 data, int cnt) 4445static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4023{ 4446{
4024 int exit_cnt = 0, ret = -1; 4447 int exit_cnt = 0, ret = -1;
4025 u64 val64; 4448 u64 val64;
4026 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4449 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4027 4450
4028 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) | 4451 if (sp->device_type == XFRAME_I_DEVICE) {
4029 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) | 4452 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4030 I2C_CONTROL_CNTL_START; 4453 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4031 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF); 4454 I2C_CONTROL_CNTL_START;
4455 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4456
4457 while (exit_cnt < 5) {
4458 val64 = readq(&bar0->i2c_control);
4459 if (I2C_CONTROL_CNTL_END(val64)) {
4460 if (!(val64 & I2C_CONTROL_NACK))
4461 ret = 0;
4462 break;
4463 }
4464 msleep(50);
4465 exit_cnt++;
4466 }
4467 }
4032 4468
4033 while (exit_cnt < 5) { 4469 if (sp->device_type == XFRAME_II_DEVICE) {
4034 val64 = readq(&bar0->i2c_control); 4470 int write_cnt = (cnt == 8) ? 0 : cnt;
4035 if (I2C_CONTROL_CNTL_END(val64)) { 4471 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4036 if (!(val64 & I2C_CONTROL_NACK)) 4472
4473 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4474 SPI_CONTROL_BYTECNT(write_cnt) |
4475 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4476 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4477 val64 |= SPI_CONTROL_REQ;
4478 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4479 while (exit_cnt < 5) {
4480 val64 = readq(&bar0->spi_control);
4481 if (val64 & SPI_CONTROL_NACK) {
4482 ret = 1;
4483 break;
4484 } else if (val64 & SPI_CONTROL_DONE) {
4037 ret = 0; 4485 ret = 0;
4038 break; 4486 break;
4487 }
4488 msleep(50);
4489 exit_cnt++;
4039 } 4490 }
4040 msleep(50);
4041 exit_cnt++;
4042 } 4491 }
4043
4044 return ret; 4492 return ret;
4045} 4493}
4046 4494
@@ -4060,7 +4508,8 @@ static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
4060static int s2io_ethtool_geeprom(struct net_device *dev, 4508static int s2io_ethtool_geeprom(struct net_device *dev,
4061 struct ethtool_eeprom *eeprom, u8 * data_buf) 4509 struct ethtool_eeprom *eeprom, u8 * data_buf)
4062{ 4510{
4063 u32 data, i, valid; 4511 u32 i, valid;
4512 u64 data;
4064 nic_t *sp = dev->priv; 4513 nic_t *sp = dev->priv;
4065 4514
4066 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16); 4515 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
@@ -4098,7 +4547,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
4098 u8 * data_buf) 4547 u8 * data_buf)
4099{ 4548{
4100 int len = eeprom->len, cnt = 0; 4549 int len = eeprom->len, cnt = 0;
4101 u32 valid = 0, data; 4550 u64 valid = 0, data;
4102 nic_t *sp = dev->priv; 4551 nic_t *sp = dev->priv;
4103 4552
4104 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) { 4553 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
@@ -4146,7 +4595,7 @@ static int s2io_ethtool_seeprom(struct net_device *dev,
4146static int s2io_register_test(nic_t * sp, uint64_t * data) 4595static int s2io_register_test(nic_t * sp, uint64_t * data)
4147{ 4596{
4148 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4597 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4149 u64 val64 = 0; 4598 u64 val64 = 0, exp_val;
4150 int fail = 0; 4599 int fail = 0;
4151 4600
4152 val64 = readq(&bar0->pif_rd_swapper_fb); 4601 val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -4162,7 +4611,11 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
4162 } 4611 }
4163 4612
4164 val64 = readq(&bar0->rx_queue_cfg); 4613 val64 = readq(&bar0->rx_queue_cfg);
4165 if (val64 != 0x0808080808080808ULL) { 4614 if (sp->device_type == XFRAME_II_DEVICE)
4615 exp_val = 0x0404040404040404ULL;
4616 else
4617 exp_val = 0x0808080808080808ULL;
4618 if (val64 != exp_val) {
4166 fail = 1; 4619 fail = 1;
4167 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n"); 4620 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
4168 } 4621 }
@@ -4190,7 +4643,7 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
4190 } 4643 }
4191 4644
4192 *data = fail; 4645 *data = fail;
4193 return 0; 4646 return fail;
4194} 4647}
4195 4648
4196/** 4649/**
@@ -4209,58 +4662,83 @@ static int s2io_register_test(nic_t * sp, uint64_t * data)
4209static int s2io_eeprom_test(nic_t * sp, uint64_t * data) 4662static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4210{ 4663{
4211 int fail = 0; 4664 int fail = 0;
4212 u32 ret_data; 4665 u64 ret_data, org_4F0, org_7F0;
4666 u8 saved_4F0 = 0, saved_7F0 = 0;
4667 struct net_device *dev = sp->dev;
4213 4668
4214 /* Test Write Error at offset 0 */ 4669 /* Test Write Error at offset 0 */
4215 if (!write_eeprom(sp, 0, 0, 3)) 4670 /* Note that SPI interface allows write access to all areas
4216 fail = 1; 4671 * of EEPROM. Hence doing all negative testing only for Xframe I.
4672 */
4673 if (sp->device_type == XFRAME_I_DEVICE)
4674 if (!write_eeprom(sp, 0, 0, 3))
4675 fail = 1;
4676
4677 /* Save current values at offsets 0x4F0 and 0x7F0 */
4678 if (!read_eeprom(sp, 0x4F0, &org_4F0))
4679 saved_4F0 = 1;
4680 if (!read_eeprom(sp, 0x7F0, &org_7F0))
4681 saved_7F0 = 1;
4217 4682
4218 /* Test Write at offset 4f0 */ 4683 /* Test Write at offset 4f0 */
4219 if (write_eeprom(sp, 0x4F0, 0x01234567, 3)) 4684 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
4220 fail = 1; 4685 fail = 1;
4221 if (read_eeprom(sp, 0x4F0, &ret_data)) 4686 if (read_eeprom(sp, 0x4F0, &ret_data))
4222 fail = 1; 4687 fail = 1;
4223 4688
4224 if (ret_data != 0x01234567) 4689 if (ret_data != 0x012345) {
4690 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
4225 fail = 1; 4691 fail = 1;
4692 }
4226 4693
4227 /* Reset the EEPROM data go FFFF */ 4694 /* Reset the EEPROM data go FFFF */
4228 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3); 4695 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
4229 4696
4230 /* Test Write Request Error at offset 0x7c */ 4697 /* Test Write Request Error at offset 0x7c */
4231 if (!write_eeprom(sp, 0x07C, 0, 3)) 4698 if (sp->device_type == XFRAME_I_DEVICE)
4232 fail = 1; 4699 if (!write_eeprom(sp, 0x07C, 0, 3))
4700 fail = 1;
4233 4701
4234 /* Test Write Request at offset 0x7fc */ 4702 /* Test Write Request at offset 0x7f0 */
4235 if (write_eeprom(sp, 0x7FC, 0x01234567, 3)) 4703 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
4236 fail = 1; 4704 fail = 1;
4237 if (read_eeprom(sp, 0x7FC, &ret_data)) 4705 if (read_eeprom(sp, 0x7F0, &ret_data))
4238 fail = 1; 4706 fail = 1;
4239 4707
4240 if (ret_data != 0x01234567) 4708 if (ret_data != 0x012345) {
4709 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. Data written %llx Data read %llx\n", dev->name, (u64)0x12345, ret_data);
4241 fail = 1; 4710 fail = 1;
4711 }
4242 4712
4243 /* Reset the EEPROM data go FFFF */ 4713 /* Reset the EEPROM data go FFFF */
4244 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3); 4714 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
4245 4715
4246 /* Test Write Error at offset 0x80 */ 4716 if (sp->device_type == XFRAME_I_DEVICE) {
4247 if (!write_eeprom(sp, 0x080, 0, 3)) 4717 /* Test Write Error at offset 0x80 */
4248 fail = 1; 4718 if (!write_eeprom(sp, 0x080, 0, 3))
4719 fail = 1;
4249 4720
4250 /* Test Write Error at offset 0xfc */ 4721 /* Test Write Error at offset 0xfc */
4251 if (!write_eeprom(sp, 0x0FC, 0, 3)) 4722 if (!write_eeprom(sp, 0x0FC, 0, 3))
4252 fail = 1; 4723 fail = 1;
4253 4724
4254 /* Test Write Error at offset 0x100 */ 4725 /* Test Write Error at offset 0x100 */
4255 if (!write_eeprom(sp, 0x100, 0, 3)) 4726 if (!write_eeprom(sp, 0x100, 0, 3))
4256 fail = 1; 4727 fail = 1;
4257 4728
4258 /* Test Write Error at offset 4ec */ 4729 /* Test Write Error at offset 4ec */
4259 if (!write_eeprom(sp, 0x4EC, 0, 3)) 4730 if (!write_eeprom(sp, 0x4EC, 0, 3))
4260 fail = 1; 4731 fail = 1;
4732 }
4733
4734 /* Restore values at offsets 0x4F0 and 0x7F0 */
4735 if (saved_4F0)
4736 write_eeprom(sp, 0x4F0, org_4F0, 3);
4737 if (saved_7F0)
4738 write_eeprom(sp, 0x7F0, org_7F0, 3);
4261 4739
4262 *data = fail; 4740 *data = fail;
4263 return 0; 4741 return fail;
4264} 4742}
4265 4743
4266/** 4744/**
@@ -4342,7 +4820,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4342{ 4820{
4343 XENA_dev_config_t __iomem *bar0 = sp->bar0; 4821 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4344 u64 val64; 4822 u64 val64;
4345 int cnt, iteration = 0, test_pass = 0; 4823 int cnt, iteration = 0, test_fail = 0;
4346 4824
4347 val64 = readq(&bar0->adapter_control); 4825 val64 = readq(&bar0->adapter_control);
4348 val64 &= ~ADAPTER_ECC_EN; 4826 val64 &= ~ADAPTER_ECC_EN;
@@ -4350,7 +4828,7 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4350 4828
4351 val64 = readq(&bar0->mc_rldram_test_ctrl); 4829 val64 = readq(&bar0->mc_rldram_test_ctrl);
4352 val64 |= MC_RLDRAM_TEST_MODE; 4830 val64 |= MC_RLDRAM_TEST_MODE;
4353 writeq(val64, &bar0->mc_rldram_test_ctrl); 4831 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4354 4832
4355 val64 = readq(&bar0->mc_rldram_mrs); 4833 val64 = readq(&bar0->mc_rldram_mrs);
4356 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE; 4834 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
@@ -4378,17 +4856,12 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4378 } 4856 }
4379 writeq(val64, &bar0->mc_rldram_test_d2); 4857 writeq(val64, &bar0->mc_rldram_test_d2);
4380 4858
4381 val64 = (u64) (0x0000003fffff0000ULL); 4859 val64 = (u64) (0x0000003ffffe0100ULL);
4382 writeq(val64, &bar0->mc_rldram_test_add); 4860 writeq(val64, &bar0->mc_rldram_test_add);
4383 4861
4384 4862 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4385 val64 = MC_RLDRAM_TEST_MODE; 4863 MC_RLDRAM_TEST_GO;
4386 writeq(val64, &bar0->mc_rldram_test_ctrl); 4864 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4387
4388 val64 |=
4389 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4390 MC_RLDRAM_TEST_GO;
4391 writeq(val64, &bar0->mc_rldram_test_ctrl);
4392 4865
4393 for (cnt = 0; cnt < 5; cnt++) { 4866 for (cnt = 0; cnt < 5; cnt++) {
4394 val64 = readq(&bar0->mc_rldram_test_ctrl); 4867 val64 = readq(&bar0->mc_rldram_test_ctrl);
@@ -4400,11 +4873,8 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4400 if (cnt == 5) 4873 if (cnt == 5)
4401 break; 4874 break;
4402 4875
4403 val64 = MC_RLDRAM_TEST_MODE; 4876 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4404 writeq(val64, &bar0->mc_rldram_test_ctrl); 4877 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
4405
4406 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4407 writeq(val64, &bar0->mc_rldram_test_ctrl);
4408 4878
4409 for (cnt = 0; cnt < 5; cnt++) { 4879 for (cnt = 0; cnt < 5; cnt++) {
4410 val64 = readq(&bar0->mc_rldram_test_ctrl); 4880 val64 = readq(&bar0->mc_rldram_test_ctrl);
@@ -4417,18 +4887,18 @@ static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4417 break; 4887 break;
4418 4888
4419 val64 = readq(&bar0->mc_rldram_test_ctrl); 4889 val64 = readq(&bar0->mc_rldram_test_ctrl);
4420 if (val64 & MC_RLDRAM_TEST_PASS) 4890 if (!(val64 & MC_RLDRAM_TEST_PASS))
4421 test_pass = 1; 4891 test_fail = 1;
4422 4892
4423 iteration++; 4893 iteration++;
4424 } 4894 }
4425 4895
4426 if (!test_pass) 4896 *data = test_fail;
4427 *data = 1;
4428 else
4429 *data = 0;
4430 4897
4431 return 0; 4898 /* Bring the adapter out of test mode */
4899 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
4900
4901 return test_fail;
4432} 4902}
4433 4903
4434/** 4904/**
@@ -4932,7 +5402,7 @@ static void s2io_card_down(nic_t * sp)
4932 5402
4933static int s2io_card_up(nic_t * sp) 5403static int s2io_card_up(nic_t * sp)
4934{ 5404{
4935 int i, ret; 5405 int i, ret = 0;
4936 mac_info_t *mac_control; 5406 mac_info_t *mac_control;
4937 struct config_param *config; 5407 struct config_param *config;
4938 struct net_device *dev = (struct net_device *) sp->dev; 5408 struct net_device *dev = (struct net_device *) sp->dev;
@@ -4944,6 +5414,15 @@ static int s2io_card_up(nic_t * sp)
4944 return -ENODEV; 5414 return -ENODEV;
4945 } 5415 }
4946 5416
5417 if (sp->intr_type == MSI)
5418 ret = s2io_enable_msi(sp);
5419 else if (sp->intr_type == MSI_X)
5420 ret = s2io_enable_msi_x(sp);
5421 if (ret) {
5422 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
5423 sp->intr_type = INTA;
5424 }
5425
4947 /* 5426 /*
4948 * Initializing the Rx buffers. For now we are considering only 1 5427 * Initializing the Rx buffers. For now we are considering only 1
4949 * Rx ring and initializing buffers into 30 Rx blocks 5428 * Rx ring and initializing buffers into 30 Rx blocks
@@ -5228,6 +5707,8 @@ static void s2io_init_pci(nic_t * sp)
5228 5707
5229MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>"); 5708MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5230MODULE_LICENSE("GPL"); 5709MODULE_LICENSE("GPL");
5710MODULE_VERSION(DRV_VERSION);
5711
5231module_param(tx_fifo_num, int, 0); 5712module_param(tx_fifo_num, int, 0);
5232module_param(rx_ring_num, int, 0); 5713module_param(rx_ring_num, int, 0);
5233module_param_array(tx_fifo_len, uint, NULL, 0); 5714module_param_array(tx_fifo_len, uint, NULL, 0);
@@ -5245,6 +5726,7 @@ module_param(bimodal, bool, 0);
5245module_param(indicate_max_pkts, int, 0); 5726module_param(indicate_max_pkts, int, 0);
5246#endif 5727#endif
5247module_param(rxsync_frequency, int, 0); 5728module_param(rxsync_frequency, int, 0);
5729module_param(intr_type, int, 0);
5248 5730
5249/** 5731/**
5250 * s2io_init_nic - Initialization of the adapter . 5732 * s2io_init_nic - Initialization of the adapter .
@@ -5274,9 +5756,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5274 mac_info_t *mac_control; 5756 mac_info_t *mac_control;
5275 struct config_param *config; 5757 struct config_param *config;
5276 int mode; 5758 int mode;
5759 u8 dev_intr_type = intr_type;
5277 5760
5278#ifdef CONFIG_S2IO_NAPI 5761#ifdef CONFIG_S2IO_NAPI
5279 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n"); 5762 if (dev_intr_type != INTA) {
5763 DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
5764is enabled. Defaulting to INTA\n");
5765 dev_intr_type = INTA;
5766 }
5767 else
5768 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5280#endif 5769#endif
5281 5770
5282 if ((ret = pci_enable_device(pdev))) { 5771 if ((ret = pci_enable_device(pdev))) {
@@ -5303,10 +5792,35 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5303 return -ENOMEM; 5792 return -ENOMEM;
5304 } 5793 }
5305 5794
5306 if (pci_request_regions(pdev, s2io_driver_name)) { 5795 if ((dev_intr_type == MSI_X) &&
5307 DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 5796 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
5308 pci_disable_device(pdev); 5797 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
5309 return -ENODEV; 5798 DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
5799Defaulting to INTA\n");
5800 dev_intr_type = INTA;
5801 }
5802 if (dev_intr_type != MSI_X) {
5803 if (pci_request_regions(pdev, s2io_driver_name)) {
5804 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5805 pci_disable_device(pdev);
5806 return -ENODEV;
5807 }
5808 }
5809 else {
5810 if (!(request_mem_region(pci_resource_start(pdev, 0),
5811 pci_resource_len(pdev, 0), s2io_driver_name))) {
5812 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
5813 pci_disable_device(pdev);
5814 return -ENODEV;
5815 }
5816 if (!(request_mem_region(pci_resource_start(pdev, 2),
5817 pci_resource_len(pdev, 2), s2io_driver_name))) {
5818 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
5819 release_mem_region(pci_resource_start(pdev, 0),
5820 pci_resource_len(pdev, 0));
5821 pci_disable_device(pdev);
5822 return -ENODEV;
5823 }
5310 } 5824 }
5311 5825
5312 dev = alloc_etherdev(sizeof(nic_t)); 5826 dev = alloc_etherdev(sizeof(nic_t));
@@ -5329,6 +5843,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5329 sp->pdev = pdev; 5843 sp->pdev = pdev;
5330 sp->high_dma_flag = dma_flag; 5844 sp->high_dma_flag = dma_flag;
5331 sp->device_enabled_once = FALSE; 5845 sp->device_enabled_once = FALSE;
5846 sp->intr_type = dev_intr_type;
5332 5847
5333 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || 5848 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5334 (pdev->device == PCI_DEVICE_ID_HERC_UNI)) 5849 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
@@ -5336,6 +5851,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5336 else 5851 else
5337 sp->device_type = XFRAME_I_DEVICE; 5852 sp->device_type = XFRAME_I_DEVICE;
5338 5853
5854
5339 /* Initialize some PCI/PCI-X fields of the NIC. */ 5855 /* Initialize some PCI/PCI-X fields of the NIC. */
5340 s2io_init_pci(sp); 5856 s2io_init_pci(sp);
5341 5857
@@ -5571,12 +6087,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5571 if (sp->device_type & XFRAME_II_DEVICE) { 6087 if (sp->device_type & XFRAME_II_DEVICE) {
5572 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ", 6088 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5573 dev->name); 6089 dev->name);
5574 DBG_PRINT(ERR_DBG, "(rev %d), %s", 6090 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
5575 get_xena_rev_id(sp->pdev), 6091 get_xena_rev_id(sp->pdev),
5576 s2io_driver_version); 6092 s2io_driver_version);
5577#ifdef CONFIG_2BUFF_MODE 6093#ifdef CONFIG_2BUFF_MODE
5578 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2); 6094 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5579#endif 6095#endif
6096 switch(sp->intr_type) {
6097 case INTA:
6098 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6099 break;
6100 case MSI:
6101 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6102 break;
6103 case MSI_X:
6104 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6105 break;
6106 }
5580 6107
5581 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n"); 6108 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5582 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", 6109 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -5595,12 +6122,23 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5595 } else { 6122 } else {
5596 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ", 6123 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5597 dev->name); 6124 dev->name);
5598 DBG_PRINT(ERR_DBG, "(rev %d), %s", 6125 DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
5599 get_xena_rev_id(sp->pdev), 6126 get_xena_rev_id(sp->pdev),
5600 s2io_driver_version); 6127 s2io_driver_version);
5601#ifdef CONFIG_2BUFF_MODE 6128#ifdef CONFIG_2BUFF_MODE
5602 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2); 6129 DBG_PRINT(ERR_DBG, ", Buffer mode %d",2);
5603#endif 6130#endif
6131 switch(sp->intr_type) {
6132 case INTA:
6133 DBG_PRINT(ERR_DBG, ", Intr type INTA");
6134 break;
6135 case MSI:
6136 DBG_PRINT(ERR_DBG, ", Intr type MSI");
6137 break;
6138 case MSI_X:
6139 DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
6140 break;
6141 }
5604 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n"); 6142 DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
5605 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n", 6143 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5606 sp->def_mac_addr[0].mac_addr[0], 6144 sp->def_mac_addr[0].mac_addr[0],
@@ -5644,7 +6182,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5644 mem_alloc_failed: 6182 mem_alloc_failed:
5645 free_shared_mem(sp); 6183 free_shared_mem(sp);
5646 pci_disable_device(pdev); 6184 pci_disable_device(pdev);
5647 pci_release_regions(pdev); 6185 if (dev_intr_type != MSI_X)
6186 pci_release_regions(pdev);
6187 else {
6188 release_mem_region(pci_resource_start(pdev, 0),
6189 pci_resource_len(pdev, 0));
6190 release_mem_region(pci_resource_start(pdev, 2),
6191 pci_resource_len(pdev, 2));
6192 }
5648 pci_set_drvdata(pdev, NULL); 6193 pci_set_drvdata(pdev, NULL);
5649 free_netdev(dev); 6194 free_netdev(dev);
5650 6195
@@ -5678,7 +6223,14 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5678 iounmap(sp->bar0); 6223 iounmap(sp->bar0);
5679 iounmap(sp->bar1); 6224 iounmap(sp->bar1);
5680 pci_disable_device(pdev); 6225 pci_disable_device(pdev);
5681 pci_release_regions(pdev); 6226 if (sp->intr_type != MSI_X)
6227 pci_release_regions(pdev);
6228 else {
6229 release_mem_region(pci_resource_start(pdev, 0),
6230 pci_resource_len(pdev, 0));
6231 release_mem_region(pci_resource_start(pdev, 2),
6232 pci_resource_len(pdev, 2));
6233 }
5682 pci_set_drvdata(pdev, NULL); 6234 pci_set_drvdata(pdev, NULL);
5683 free_netdev(dev); 6235 free_netdev(dev);
5684} 6236}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 89151cb52181..1cc24b56760e 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -652,6 +652,30 @@ typedef struct {
652#define SMALL_BLK_CNT 30 652#define SMALL_BLK_CNT 30
653#define LARGE_BLK_CNT 100 653#define LARGE_BLK_CNT 100
654 654
655/*
656 * Structure to keep track of the MSI-X vectors and the corresponding
657 * argument registered against each vector
658 */
659#define MAX_REQUESTED_MSI_X 17
660struct s2io_msix_entry
661{
662 u16 vector;
663 u16 entry;
664 void *arg;
665
666 u8 type;
667#define MSIX_FIFO_TYPE 1
668#define MSIX_RING_TYPE 2
669
670 u8 in_use;
671#define MSIX_REGISTERED_SUCCESS 0xAA
672};
673
674struct msix_info_st {
675 u64 addr;
676 u64 data;
677};
678
655/* Structure representing one instance of the NIC */ 679/* Structure representing one instance of the NIC */
656struct s2io_nic { 680struct s2io_nic {
657#ifdef CONFIG_S2IO_NAPI 681#ifdef CONFIG_S2IO_NAPI
@@ -719,13 +743,8 @@ struct s2io_nic {
719 * a schedule task that will set the correct Link state once the 743 * a schedule task that will set the correct Link state once the
720 * NIC's PHY has stabilized after a state change. 744 * NIC's PHY has stabilized after a state change.
721 */ 745 */
722#ifdef INIT_TQUEUE
723 struct tq_struct rst_timer_task;
724 struct tq_struct set_link_task;
725#else
726 struct work_struct rst_timer_task; 746 struct work_struct rst_timer_task;
727 struct work_struct set_link_task; 747 struct work_struct set_link_task;
728#endif
729 748
730 /* Flag that can be used to turn on or turn off the Rx checksum 749 /* Flag that can be used to turn on or turn off the Rx checksum
731 * offload feature. 750 * offload feature.
@@ -748,10 +767,23 @@ struct s2io_nic {
748 atomic_t card_state; 767 atomic_t card_state;
749 volatile unsigned long link_state; 768 volatile unsigned long link_state;
750 struct vlan_group *vlgrp; 769 struct vlan_group *vlgrp;
770#define MSIX_FLG 0xA5
771 struct msix_entry *entries;
772 struct s2io_msix_entry *s2io_entries;
773 char desc1[35];
774 char desc2[35];
775
776 struct msix_info_st msix_info[0x3f];
777
751#define XFRAME_I_DEVICE 1 778#define XFRAME_I_DEVICE 1
752#define XFRAME_II_DEVICE 2 779#define XFRAME_II_DEVICE 2
753 u8 device_type; 780 u8 device_type;
754 781
782#define INTA 0
783#define MSI 1
784#define MSI_X 2
785 u8 intr_type;
786
755 spinlock_t rx_lock; 787 spinlock_t rx_lock;
756 atomic_t isr_cnt; 788 atomic_t isr_cnt;
757}; 789};
@@ -886,6 +918,13 @@ static int s2io_poll(struct net_device *dev, int *budget);
886static void s2io_init_pci(nic_t * sp); 918static void s2io_init_pci(nic_t * sp);
887int s2io_set_mac_addr(struct net_device *dev, u8 * addr); 919int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
888static void s2io_alarm_handle(unsigned long data); 920static void s2io_alarm_handle(unsigned long data);
921static int s2io_enable_msi(nic_t *nic);
922static irqreturn_t s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs);
923static irqreturn_t
924s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs);
925static irqreturn_t
926s2io_msix_fifo_handle(int irq, void *dev_id, struct pt_regs *regs);
927int s2io_enable_msi_x(nic_t *nic);
889static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs); 928static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
890static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag); 929static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
891static struct ethtool_ops netdev_ethtool_ops; 930static struct ethtool_ops netdev_ethtool_ops;
@@ -894,4 +933,5 @@ int s2io_set_swapper(nic_t * sp);
894static void s2io_card_down(nic_t *nic); 933static void s2io_card_down(nic_t *nic);
895static int s2io_card_up(nic_t *nic); 934static int s2io_card_up(nic_t *nic);
896int get_xena_rev_id(struct pci_dev *pdev); 935int get_xena_rev_id(struct pci_dev *pdev);
936void restore_xmsi_data(nic_t *nic);
897#endif /* _S2IO_H */ 937#endif /* _S2IO_H */
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index fd0167077fbe..110e777f206e 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -997,10 +997,7 @@ static void __devexit saa9730_remove_one(struct pci_dev *pdev)
997 997
998 if (dev) { 998 if (dev) {
999 unregister_netdev(dev); 999 unregister_netdev(dev);
1000 1000 kfree(dev->priv);
1001 if (dev->priv)
1002 kfree(dev->priv);
1003
1004 free_netdev(dev); 1001 free_netdev(dev);
1005 pci_release_regions(pdev); 1002 pci_release_regions(pdev);
1006 pci_disable_device(pdev); 1003 pci_disable_device(pdev);
@@ -1096,8 +1093,7 @@ static int lan_saa9730_init(struct net_device *dev, int ioaddr, int irq)
1096 return 0; 1093 return 0;
1097 1094
1098 out: 1095 out:
1099 if (dev->priv) 1096 kfree(dev->priv);
1100 kfree(dev->priv);
1101 return ret; 1097 return ret;
1102} 1098}
1103 1099
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 7abd55a4fb21..aa4ca1821759 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -10,7 +10,7 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License 14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
@@ -118,8 +118,6 @@ MODULE_PARM_DESC(int_timeout, "Timeout value");
118 ********************************************************************* */ 118 ********************************************************************* */
119 119
120 120
121typedef unsigned long sbmac_port_t;
122
123typedef enum { sbmac_speed_auto, sbmac_speed_10, 121typedef enum { sbmac_speed_auto, sbmac_speed_10,
124 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t; 122 sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
125 123
@@ -129,7 +127,7 @@ typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
129typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame, 127typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
130 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t; 128 sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
131 129
132typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on, 130typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
133 sbmac_state_broken } sbmac_state_t; 131 sbmac_state_broken } sbmac_state_t;
134 132
135 133
@@ -144,17 +142,13 @@ typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
144 142
145#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) 143#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
146 144
147#define SBMAC_READCSR(t) __raw_readq((unsigned long)t)
148#define SBMAC_WRITECSR(t,v) __raw_writeq(v, (unsigned long)t)
149
150
151#define SBMAC_MAX_TXDESCR 32 145#define SBMAC_MAX_TXDESCR 32
152#define SBMAC_MAX_RXDESCR 32 146#define SBMAC_MAX_RXDESCR 32
153 147
154#define ETHER_ALIGN 2 148#define ETHER_ALIGN 2
155#define ETHER_ADDR_LEN 6 149#define ETHER_ADDR_LEN 6
156#define ENET_PACKET_SIZE 1518 150#define ENET_PACKET_SIZE 1518
157/*#define ENET_PACKET_SIZE 9216 */ 151/*#define ENET_PACKET_SIZE 9216 */
158 152
159/********************************************************************** 153/**********************************************************************
160 * DMA Descriptor structure 154 * DMA Descriptor structure
@@ -172,12 +166,12 @@ typedef unsigned long paddr_t;
172 ********************************************************************* */ 166 ********************************************************************* */
173 167
174typedef struct sbmacdma_s { 168typedef struct sbmacdma_s {
175 169
176 /* 170 /*
177 * This stuff is used to identify the channel and the registers 171 * This stuff is used to identify the channel and the registers
178 * associated with it. 172 * associated with it.
179 */ 173 */
180 174
181 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */ 175 struct sbmac_softc *sbdma_eth; /* back pointer to associated MAC */
182 int sbdma_channel; /* channel number */ 176 int sbdma_channel; /* channel number */
183 int sbdma_txdir; /* direction (1=transmit) */ 177 int sbdma_txdir; /* direction (1=transmit) */
@@ -187,21 +181,21 @@ typedef struct sbmacdma_s {
187 int sbdma_int_timeout; /* # usec rx/tx interrupt */ 181 int sbdma_int_timeout; /* # usec rx/tx interrupt */
188#endif 182#endif
189 183
190 sbmac_port_t sbdma_config0; /* DMA config register 0 */ 184 volatile void __iomem *sbdma_config0; /* DMA config register 0 */
191 sbmac_port_t sbdma_config1; /* DMA config register 1 */ 185 volatile void __iomem *sbdma_config1; /* DMA config register 1 */
192 sbmac_port_t sbdma_dscrbase; /* Descriptor base address */ 186 volatile void __iomem *sbdma_dscrbase; /* Descriptor base address */
193 sbmac_port_t sbdma_dscrcnt; /* Descriptor count register */ 187 volatile void __iomem *sbdma_dscrcnt; /* Descriptor count register */
194 sbmac_port_t sbdma_curdscr; /* current descriptor address */ 188 volatile void __iomem *sbdma_curdscr; /* current descriptor address */
195 189
196 /* 190 /*
197 * This stuff is for maintenance of the ring 191 * This stuff is for maintenance of the ring
198 */ 192 */
199 193
200 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */ 194 sbdmadscr_t *sbdma_dscrtable; /* base of descriptor table */
201 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */ 195 sbdmadscr_t *sbdma_dscrtable_end; /* end of descriptor table */
202 196
203 struct sk_buff **sbdma_ctxtable; /* context table, one per descr */ 197 struct sk_buff **sbdma_ctxtable; /* context table, one per descr */
204 198
205 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */ 199 paddr_t sbdma_dscrtable_phys; /* and also the phys addr */
206 sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */ 200 sbdmadscr_t *sbdma_addptr; /* next dscr for sw to add */
207 sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */ 201 sbdmadscr_t *sbdma_remptr; /* next dscr for sw to remove */
@@ -213,15 +207,15 @@ typedef struct sbmacdma_s {
213 ********************************************************************* */ 207 ********************************************************************* */
214 208
215struct sbmac_softc { 209struct sbmac_softc {
216 210
217 /* 211 /*
218 * Linux-specific things 212 * Linux-specific things
219 */ 213 */
220 214
221 struct net_device *sbm_dev; /* pointer to linux device */ 215 struct net_device *sbm_dev; /* pointer to linux device */
222 spinlock_t sbm_lock; /* spin lock */ 216 spinlock_t sbm_lock; /* spin lock */
223 struct timer_list sbm_timer; /* for monitoring MII */ 217 struct timer_list sbm_timer; /* for monitoring MII */
224 struct net_device_stats sbm_stats; 218 struct net_device_stats sbm_stats;
225 int sbm_devflags; /* current device flags */ 219 int sbm_devflags; /* current device flags */
226 220
227 int sbm_phy_oldbmsr; 221 int sbm_phy_oldbmsr;
@@ -229,31 +223,31 @@ struct sbmac_softc {
229 int sbm_phy_oldk1stsr; 223 int sbm_phy_oldk1stsr;
230 int sbm_phy_oldlinkstat; 224 int sbm_phy_oldlinkstat;
231 int sbm_buffersize; 225 int sbm_buffersize;
232 226
233 unsigned char sbm_phys[2]; 227 unsigned char sbm_phys[2];
234 228
235 /* 229 /*
236 * Controller-specific things 230 * Controller-specific things
237 */ 231 */
238 232
239 unsigned long sbm_base; /* MAC's base address */ 233 volatile void __iomem *sbm_base; /* MAC's base address */
240 sbmac_state_t sbm_state; /* current state */ 234 sbmac_state_t sbm_state; /* current state */
241 235
242 sbmac_port_t sbm_macenable; /* MAC Enable Register */ 236 volatile void __iomem *sbm_macenable; /* MAC Enable Register */
243 sbmac_port_t sbm_maccfg; /* MAC Configuration Register */ 237 volatile void __iomem *sbm_maccfg; /* MAC Configuration Register */
244 sbmac_port_t sbm_fifocfg; /* FIFO configuration register */ 238 volatile void __iomem *sbm_fifocfg; /* FIFO configuration register */
245 sbmac_port_t sbm_framecfg; /* Frame configuration register */ 239 volatile void __iomem *sbm_framecfg; /* Frame configuration register */
246 sbmac_port_t sbm_rxfilter; /* receive filter register */ 240 volatile void __iomem *sbm_rxfilter; /* receive filter register */
247 sbmac_port_t sbm_isr; /* Interrupt status register */ 241 volatile void __iomem *sbm_isr; /* Interrupt status register */
248 sbmac_port_t sbm_imr; /* Interrupt mask register */ 242 volatile void __iomem *sbm_imr; /* Interrupt mask register */
249 sbmac_port_t sbm_mdio; /* MDIO register */ 243 volatile void __iomem *sbm_mdio; /* MDIO register */
250 244
251 sbmac_speed_t sbm_speed; /* current speed */ 245 sbmac_speed_t sbm_speed; /* current speed */
252 sbmac_duplex_t sbm_duplex; /* current duplex */ 246 sbmac_duplex_t sbm_duplex; /* current duplex */
253 sbmac_fc_t sbm_fc; /* current flow control setting */ 247 sbmac_fc_t sbm_fc; /* current flow control setting */
254 248
255 unsigned char sbm_hwaddr[ETHER_ADDR_LEN]; 249 unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
256 250
257 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */ 251 sbmacdma_t sbm_txdma; /* for now, only use channel 0 */
258 sbmacdma_t sbm_rxdma; 252 sbmacdma_t sbm_rxdma;
259 int rx_hw_checksum; 253 int rx_hw_checksum;
@@ -302,6 +296,7 @@ static void sbmac_set_rx_mode(struct net_device *dev);
302static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 296static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
303static int sbmac_close(struct net_device *dev); 297static int sbmac_close(struct net_device *dev);
304static int sbmac_mii_poll(struct sbmac_softc *s,int noisy); 298static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
299static int sbmac_mii_probe(struct net_device *dev);
305 300
306static void sbmac_mii_sync(struct sbmac_softc *s); 301static void sbmac_mii_sync(struct sbmac_softc *s);
307static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt); 302static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
@@ -439,6 +434,9 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
439 434
440#define MII_BMCR 0x00 /* Basic mode control register (rw) */ 435#define MII_BMCR 0x00 /* Basic mode control register (rw) */
441#define MII_BMSR 0x01 /* Basic mode status register (ro) */ 436#define MII_BMSR 0x01 /* Basic mode status register (ro) */
437#define MII_PHYIDR1 0x02
438#define MII_PHYIDR2 0x03
439
442#define MII_K1STSR 0x0A /* 1K Status Register (ro) */ 440#define MII_K1STSR 0x0A /* 1K Status Register (ro) */
443#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */ 441#define MII_ANLPAR 0x05 /* Autonegotiation lnk partner abilities (rw) */
444 442
@@ -450,13 +448,13 @@ static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
450 448
451/********************************************************************** 449/**********************************************************************
452 * SBMAC_MII_SYNC(s) 450 * SBMAC_MII_SYNC(s)
453 * 451 *
454 * Synchronize with the MII - send a pattern of bits to the MII 452 * Synchronize with the MII - send a pattern of bits to the MII
455 * that will guarantee that it is ready to accept a command. 453 * that will guarantee that it is ready to accept a command.
456 * 454 *
457 * Input parameters: 455 * Input parameters:
458 * s - sbmac structure 456 * s - sbmac structure
459 * 457 *
460 * Return value: 458 * Return value:
461 * nothing 459 * nothing
462 ********************************************************************* */ 460 ********************************************************************* */
@@ -467,25 +465,25 @@ static void sbmac_mii_sync(struct sbmac_softc *s)
467 uint64_t bits; 465 uint64_t bits;
468 int mac_mdio_genc; 466 int mac_mdio_genc;
469 467
470 mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; 468 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
471 469
472 bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT; 470 bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
473 471
474 SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); 472 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
475 473
476 for (cnt = 0; cnt < 32; cnt++) { 474 for (cnt = 0; cnt < 32; cnt++) {
477 SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc); 475 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
478 SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); 476 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
479 } 477 }
480} 478}
481 479
482/********************************************************************** 480/**********************************************************************
483 * SBMAC_MII_SENDDATA(s,data,bitcnt) 481 * SBMAC_MII_SENDDATA(s,data,bitcnt)
484 * 482 *
485 * Send some bits to the MII. The bits to be sent are right- 483 * Send some bits to the MII. The bits to be sent are right-
486 * justified in the 'data' parameter. 484 * justified in the 'data' parameter.
487 * 485 *
488 * Input parameters: 486 * Input parameters:
489 * s - sbmac structure 487 * s - sbmac structure
490 * data - data to send 488 * data - data to send
491 * bitcnt - number of bits to send 489 * bitcnt - number of bits to send
@@ -498,20 +496,20 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc
498 unsigned int curmask; 496 unsigned int curmask;
499 int mac_mdio_genc; 497 int mac_mdio_genc;
500 498
501 mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; 499 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
502 500
503 bits = M_MAC_MDIO_DIR_OUTPUT; 501 bits = M_MAC_MDIO_DIR_OUTPUT;
504 SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); 502 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
505 503
506 curmask = 1 << (bitcnt - 1); 504 curmask = 1 << (bitcnt - 1);
507 505
508 for (i = 0; i < bitcnt; i++) { 506 for (i = 0; i < bitcnt; i++) {
509 if (data & curmask) 507 if (data & curmask)
510 bits |= M_MAC_MDIO_OUT; 508 bits |= M_MAC_MDIO_OUT;
511 else bits &= ~M_MAC_MDIO_OUT; 509 else bits &= ~M_MAC_MDIO_OUT;
512 SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); 510 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
513 SBMAC_WRITECSR(s->sbm_mdio,bits | M_MAC_MDC | mac_mdio_genc); 511 __raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
514 SBMAC_WRITECSR(s->sbm_mdio,bits | mac_mdio_genc); 512 __raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
515 curmask >>= 1; 513 curmask >>= 1;
516 } 514 }
517} 515}
@@ -520,14 +518,14 @@ static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitc
520 518
521/********************************************************************** 519/**********************************************************************
522 * SBMAC_MII_READ(s,phyaddr,regidx) 520 * SBMAC_MII_READ(s,phyaddr,regidx)
523 * 521 *
524 * Read a PHY register. 522 * Read a PHY register.
525 * 523 *
526 * Input parameters: 524 * Input parameters:
527 * s - sbmac structure 525 * s - sbmac structure
528 * phyaddr - PHY's address 526 * phyaddr - PHY's address
529 * regidx = index of register to read 527 * regidx = index of register to read
530 * 528 *
531 * Return value: 529 * Return value:
532 * value read, or 0 if an error occurred. 530 * value read, or 0 if an error occurred.
533 ********************************************************************* */ 531 ********************************************************************* */
@@ -543,9 +541,9 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
543 * Synchronize ourselves so that the PHY knows the next 541 * Synchronize ourselves so that the PHY knows the next
544 * thing coming down is a command 542 * thing coming down is a command
545 */ 543 */
546 544
547 sbmac_mii_sync(s); 545 sbmac_mii_sync(s);
548 546
549 /* 547 /*
550 * Send the data to the PHY. The sequence is 548 * Send the data to the PHY. The sequence is
551 * a "start" command (2 bits) 549 * a "start" command (2 bits)
@@ -553,59 +551,55 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
553 * the PHY addr (5 bits) 551 * the PHY addr (5 bits)
554 * the register index (5 bits) 552 * the register index (5 bits)
555 */ 553 */
556 554
557 sbmac_mii_senddata(s,MII_COMMAND_START, 2); 555 sbmac_mii_senddata(s,MII_COMMAND_START, 2);
558 sbmac_mii_senddata(s,MII_COMMAND_READ, 2); 556 sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
559 sbmac_mii_senddata(s,phyaddr, 5); 557 sbmac_mii_senddata(s,phyaddr, 5);
560 sbmac_mii_senddata(s,regidx, 5); 558 sbmac_mii_senddata(s,regidx, 5);
561 559
562 mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; 560 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
563 561
564 /* 562 /*
565 * Switch the port around without a clock transition. 563 * Switch the port around without a clock transition.
566 */ 564 */
567 SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); 565 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
568 566
569 /* 567 /*
570 * Send out a clock pulse to signal we want the status 568 * Send out a clock pulse to signal we want the status
571 */ 569 */
572 570
573 SBMAC_WRITECSR(s->sbm_mdio, 571 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
574 M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc); 572 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
575 SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); 573
576 574 /*
577 /*
578 * If an error occurred, the PHY will signal '1' back 575 * If an error occurred, the PHY will signal '1' back
579 */ 576 */
580 error = SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN; 577 error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN;
581 578
582 /* 579 /*
583 * Issue an 'idle' clock pulse, but keep the direction 580 * Issue an 'idle' clock pulse, but keep the direction
584 * the same. 581 * the same.
585 */ 582 */
586 SBMAC_WRITECSR(s->sbm_mdio, 583 __raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
587 M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc); 584 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
588 SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_INPUT | mac_mdio_genc); 585
589
590 regval = 0; 586 regval = 0;
591 587
592 for (idx = 0; idx < 16; idx++) { 588 for (idx = 0; idx < 16; idx++) {
593 regval <<= 1; 589 regval <<= 1;
594 590
595 if (error == 0) { 591 if (error == 0) {
596 if (SBMAC_READCSR(s->sbm_mdio) & M_MAC_MDIO_IN) 592 if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN)
597 regval |= 1; 593 regval |= 1;
598 } 594 }
599 595
600 SBMAC_WRITECSR(s->sbm_mdio, 596 __raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
601 M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc); 597 __raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
602 SBMAC_WRITECSR(s->sbm_mdio,
603 M_MAC_MDIO_DIR_INPUT | mac_mdio_genc);
604 } 598 }
605 599
606 /* Switch back to output */ 600 /* Switch back to output */
607 SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc); 601 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
608 602
609 if (error == 0) 603 if (error == 0)
610 return regval; 604 return regval;
611 return 0; 605 return 0;
@@ -614,15 +608,15 @@ static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
614 608
615/********************************************************************** 609/**********************************************************************
616 * SBMAC_MII_WRITE(s,phyaddr,regidx,regval) 610 * SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
617 * 611 *
618 * Write a value to a PHY register. 612 * Write a value to a PHY register.
619 * 613 *
620 * Input parameters: 614 * Input parameters:
621 * s - sbmac structure 615 * s - sbmac structure
622 * phyaddr - PHY to use 616 * phyaddr - PHY to use
623 * regidx - register within the PHY 617 * regidx - register within the PHY
624 * regval - data to write to register 618 * regval - data to write to register
625 * 619 *
626 * Return value: 620 * Return value:
627 * nothing 621 * nothing
628 ********************************************************************* */ 622 ********************************************************************* */
@@ -633,7 +627,7 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
633 int mac_mdio_genc; 627 int mac_mdio_genc;
634 628
635 sbmac_mii_sync(s); 629 sbmac_mii_sync(s);
636 630
637 sbmac_mii_senddata(s,MII_COMMAND_START,2); 631 sbmac_mii_senddata(s,MII_COMMAND_START,2);
638 sbmac_mii_senddata(s,MII_COMMAND_WRITE,2); 632 sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
639 sbmac_mii_senddata(s,phyaddr, 5); 633 sbmac_mii_senddata(s,phyaddr, 5);
@@ -641,27 +635,27 @@ static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
641 sbmac_mii_senddata(s,MII_COMMAND_ACK,2); 635 sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
642 sbmac_mii_senddata(s,regval,16); 636 sbmac_mii_senddata(s,regval,16);
643 637
644 mac_mdio_genc = SBMAC_READCSR(s->sbm_mdio) & M_MAC_GENC; 638 mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
645 639
646 SBMAC_WRITECSR(s->sbm_mdio,M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc); 640 __raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
647} 641}
648 642
649 643
650 644
651/********************************************************************** 645/**********************************************************************
652 * SBDMA_INITCTX(d,s,chan,txrx,maxdescr) 646 * SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
653 * 647 *
654 * Initialize a DMA channel context. Since there are potentially 648 * Initialize a DMA channel context. Since there are potentially
655 * eight DMA channels per MAC, it's nice to do this in a standard 649 * eight DMA channels per MAC, it's nice to do this in a standard
656 * way. 650 * way.
657 * 651 *
658 * Input parameters: 652 * Input parameters:
659 * d - sbmacdma_t structure (DMA channel context) 653 * d - sbmacdma_t structure (DMA channel context)
660 * s - sbmac_softc structure (pointer to a MAC) 654 * s - sbmac_softc structure (pointer to a MAC)
661 * chan - channel number (0..1 right now) 655 * chan - channel number (0..1 right now)
662 * txrx - Identifies DMA_TX or DMA_RX for channel direction 656 * txrx - Identifies DMA_TX or DMA_RX for channel direction
663 * maxdescr - number of descriptors 657 * maxdescr - number of descriptors
664 * 658 *
665 * Return value: 659 * Return value:
666 * nothing 660 * nothing
667 ********************************************************************* */ 661 ********************************************************************* */
@@ -672,101 +666,87 @@ static void sbdma_initctx(sbmacdma_t *d,
672 int txrx, 666 int txrx,
673 int maxdescr) 667 int maxdescr)
674{ 668{
675 /* 669 /*
676 * Save away interesting stuff in the structure 670 * Save away interesting stuff in the structure
677 */ 671 */
678 672
679 d->sbdma_eth = s; 673 d->sbdma_eth = s;
680 d->sbdma_channel = chan; 674 d->sbdma_channel = chan;
681 d->sbdma_txdir = txrx; 675 d->sbdma_txdir = txrx;
682 676
683#if 0 677#if 0
684 /* RMON clearing */ 678 /* RMON clearing */
685 s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING; 679 s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
686#endif 680#endif
687 681
688 SBMAC_WRITECSR(IOADDR( 682 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)));
689 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)), 0); 683 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)));
690 SBMAC_WRITECSR(IOADDR( 684 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)));
691 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)), 0); 685 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)));
692 SBMAC_WRITECSR(IOADDR( 686 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)));
693 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)), 0); 687 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)));
694 SBMAC_WRITECSR(IOADDR( 688 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)));
695 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)), 0); 689 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)));
696 SBMAC_WRITECSR(IOADDR( 690 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)));
697 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)), 0); 691 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)));
698 SBMAC_WRITECSR(IOADDR( 692 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)));
699 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)), 0); 693 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)));
700 SBMAC_WRITECSR(IOADDR( 694 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)));
701 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)), 0); 695 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)));
702 SBMAC_WRITECSR(IOADDR( 696 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)));
703 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)), 0); 697 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)));
704 SBMAC_WRITECSR(IOADDR( 698 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)));
705 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)), 0); 699 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)));
706 SBMAC_WRITECSR(IOADDR( 700 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)));
707 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)), 0); 701 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)));
708 SBMAC_WRITECSR(IOADDR( 702 __raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)));
709 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)), 0); 703
710 SBMAC_WRITECSR(IOADDR( 704 /*
711 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)), 0); 705 * initialize register pointers
712 SBMAC_WRITECSR(IOADDR( 706 */
713 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)), 0); 707
714 SBMAC_WRITECSR(IOADDR( 708 d->sbdma_config0 =
715 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)), 0);
716 SBMAC_WRITECSR(IOADDR(
717 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)), 0);
718 SBMAC_WRITECSR(IOADDR(
719 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)), 0);
720 SBMAC_WRITECSR(IOADDR(
721 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)), 0);
722 SBMAC_WRITECSR(IOADDR(
723 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)), 0);
724 SBMAC_WRITECSR(IOADDR(
725 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)), 0);
726 SBMAC_WRITECSR(IOADDR(
727 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)), 0);
728 SBMAC_WRITECSR(IOADDR(
729 A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)), 0);
730
731 /*
732 * initialize register pointers
733 */
734
735 d->sbdma_config0 =
736 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0); 709 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
737 d->sbdma_config1 = 710 d->sbdma_config1 =
738 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1); 711 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
739 d->sbdma_dscrbase = 712 d->sbdma_dscrbase =
740 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE); 713 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
741 d->sbdma_dscrcnt = 714 d->sbdma_dscrcnt =
742 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT); 715 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
743 d->sbdma_curdscr = 716 d->sbdma_curdscr =
744 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR); 717 s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
745 718
746 /* 719 /*
747 * Allocate memory for the ring 720 * Allocate memory for the ring
748 */ 721 */
749 722
750 d->sbdma_maxdescr = maxdescr; 723 d->sbdma_maxdescr = maxdescr;
751 724
752 d->sbdma_dscrtable = (sbdmadscr_t *) 725 d->sbdma_dscrtable = (sbdmadscr_t *)
753 kmalloc(d->sbdma_maxdescr*sizeof(sbdmadscr_t), GFP_KERNEL); 726 kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
754 727
728 /*
729 * The descriptor table must be aligned to at least 16 bytes or the
730 * MAC will corrupt it.
731 */
732 d->sbdma_dscrtable = (sbdmadscr_t *)
733 ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t));
734
755 memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t)); 735 memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
756 736
757 d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr; 737 d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
758 738
759 d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable); 739 d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
760 740
761 /* 741 /*
762 * And context table 742 * And context table
763 */ 743 */
764 744
765 d->sbdma_ctxtable = (struct sk_buff **) 745 d->sbdma_ctxtable = (struct sk_buff **)
766 kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL); 746 kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
767 747
768 memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *)); 748 memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
769 749
770#ifdef CONFIG_SBMAC_COALESCE 750#ifdef CONFIG_SBMAC_COALESCE
771 /* 751 /*
772 * Setup Rx/Tx DMA coalescing defaults 752 * Setup Rx/Tx DMA coalescing defaults
@@ -777,7 +757,7 @@ static void sbdma_initctx(sbmacdma_t *d,
777 } else { 757 } else {
778 d->sbdma_int_pktcnt = 1; 758 d->sbdma_int_pktcnt = 1;
779 } 759 }
780 760
781 if ( int_timeout ) { 761 if ( int_timeout ) {
782 d->sbdma_int_timeout = int_timeout; 762 d->sbdma_int_timeout = int_timeout;
783 } else { 763 } else {
@@ -789,13 +769,13 @@ static void sbdma_initctx(sbmacdma_t *d,
789 769
790/********************************************************************** 770/**********************************************************************
791 * SBDMA_CHANNEL_START(d) 771 * SBDMA_CHANNEL_START(d)
792 * 772 *
793 * Initialize the hardware registers for a DMA channel. 773 * Initialize the hardware registers for a DMA channel.
794 * 774 *
795 * Input parameters: 775 * Input parameters:
796 * d - DMA channel to init (context must be previously init'd 776 * d - DMA channel to init (context must be previously init'd
797 * rxtx - DMA_RX or DMA_TX depending on what type of channel 777 * rxtx - DMA_RX or DMA_TX depending on what type of channel
798 * 778 *
799 * Return value: 779 * Return value:
800 * nothing 780 * nothing
801 ********************************************************************* */ 781 ********************************************************************* */
@@ -805,24 +785,21 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
805 /* 785 /*
806 * Turn on the DMA channel 786 * Turn on the DMA channel
807 */ 787 */
808 788
809#ifdef CONFIG_SBMAC_COALESCE 789#ifdef CONFIG_SBMAC_COALESCE
810 SBMAC_WRITECSR(d->sbdma_config1, 790 __raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
811 V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) | 791 0, d->sbdma_config1);
812 0); 792 __raw_writeq(M_DMA_EOP_INT_EN |
813 SBMAC_WRITECSR(d->sbdma_config0,
814 M_DMA_EOP_INT_EN |
815 V_DMA_RINGSZ(d->sbdma_maxdescr) | 793 V_DMA_RINGSZ(d->sbdma_maxdescr) |
816 V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) | 794 V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
817 0); 795 0, d->sbdma_config0);
818#else 796#else
819 SBMAC_WRITECSR(d->sbdma_config1,0); 797 __raw_writeq(0, d->sbdma_config1);
820 SBMAC_WRITECSR(d->sbdma_config0, 798 __raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
821 V_DMA_RINGSZ(d->sbdma_maxdescr) | 799 0, d->sbdma_config0);
822 0);
823#endif 800#endif
824 801
825 SBMAC_WRITECSR(d->sbdma_dscrbase,d->sbdma_dscrtable_phys); 802 __raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
826 803
827 /* 804 /*
828 * Initialize ring pointers 805 * Initialize ring pointers
@@ -834,12 +811,12 @@ static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
834 811
835/********************************************************************** 812/**********************************************************************
836 * SBDMA_CHANNEL_STOP(d) 813 * SBDMA_CHANNEL_STOP(d)
837 * 814 *
838 * Initialize the hardware registers for a DMA channel. 815 * Initialize the hardware registers for a DMA channel.
839 * 816 *
840 * Input parameters: 817 * Input parameters:
841 * d - DMA channel to init (context must be previously init'd 818 * d - DMA channel to init (context must be previously init'd
842 * 819 *
843 * Return value: 820 * Return value:
844 * nothing 821 * nothing
845 ********************************************************************* */ 822 ********************************************************************* */
@@ -849,44 +826,44 @@ static void sbdma_channel_stop(sbmacdma_t *d)
849 /* 826 /*
850 * Turn off the DMA channel 827 * Turn off the DMA channel
851 */ 828 */
852 829
853 SBMAC_WRITECSR(d->sbdma_config1,0); 830 __raw_writeq(0, d->sbdma_config1);
854 831
855 SBMAC_WRITECSR(d->sbdma_dscrbase,0); 832 __raw_writeq(0, d->sbdma_dscrbase);
856 833
857 SBMAC_WRITECSR(d->sbdma_config0,0); 834 __raw_writeq(0, d->sbdma_config0);
858 835
859 /* 836 /*
860 * Zero ring pointers 837 * Zero ring pointers
861 */ 838 */
862 839
863 d->sbdma_addptr = 0; 840 d->sbdma_addptr = NULL;
864 d->sbdma_remptr = 0; 841 d->sbdma_remptr = NULL;
865} 842}
866 843
867static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) 844static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
868{ 845{
869 unsigned long addr; 846 unsigned long addr;
870 unsigned long newaddr; 847 unsigned long newaddr;
871 848
872 addr = (unsigned long) skb->data; 849 addr = (unsigned long) skb->data;
873 850
874 newaddr = (addr + power2 - 1) & ~(power2 - 1); 851 newaddr = (addr + power2 - 1) & ~(power2 - 1);
875 852
876 skb_reserve(skb,newaddr-addr+offset); 853 skb_reserve(skb,newaddr-addr+offset);
877} 854}
878 855
879 856
880/********************************************************************** 857/**********************************************************************
881 * SBDMA_ADD_RCVBUFFER(d,sb) 858 * SBDMA_ADD_RCVBUFFER(d,sb)
882 * 859 *
883 * Add a buffer to the specified DMA channel. For receive channels, 860 * Add a buffer to the specified DMA channel. For receive channels,
884 * this queues a buffer for inbound packets. 861 * this queues a buffer for inbound packets.
885 * 862 *
886 * Input parameters: 863 * Input parameters:
887 * d - DMA channel descriptor 864 * d - DMA channel descriptor
888 * sb - sk_buff to add, or NULL if we should allocate one 865 * sb - sk_buff to add, or NULL if we should allocate one
889 * 866 *
890 * Return value: 867 * Return value:
891 * 0 if buffer could not be added (ring is full) 868 * 0 if buffer could not be added (ring is full)
892 * 1 if buffer added successfully 869 * 1 if buffer added successfully
@@ -899,24 +876,24 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
899 sbdmadscr_t *nextdsc; 876 sbdmadscr_t *nextdsc;
900 struct sk_buff *sb_new = NULL; 877 struct sk_buff *sb_new = NULL;
901 int pktsize = ENET_PACKET_SIZE; 878 int pktsize = ENET_PACKET_SIZE;
902 879
903 /* get pointer to our current place in the ring */ 880 /* get pointer to our current place in the ring */
904 881
905 dsc = d->sbdma_addptr; 882 dsc = d->sbdma_addptr;
906 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); 883 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
907 884
908 /* 885 /*
909 * figure out if the ring is full - if the next descriptor 886 * figure out if the ring is full - if the next descriptor
910 * is the same as the one that we're going to remove from 887 * is the same as the one that we're going to remove from
911 * the ring, the ring is full 888 * the ring, the ring is full
912 */ 889 */
913 890
914 if (nextdsc == d->sbdma_remptr) { 891 if (nextdsc == d->sbdma_remptr) {
915 return -ENOSPC; 892 return -ENOSPC;
916 } 893 }
917 894
918 /* 895 /*
919 * Allocate a sk_buff if we don't already have one. 896 * Allocate a sk_buff if we don't already have one.
920 * If we do have an sk_buff, reset it so that it's empty. 897 * If we do have an sk_buff, reset it so that it's empty.
921 * 898 *
922 * Note: sk_buffs don't seem to be guaranteed to have any sort 899 * Note: sk_buffs don't seem to be guaranteed to have any sort
@@ -925,7 +902,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
925 * 902 *
926 * 1. the data does not start in the middle of a cache line. 903 * 1. the data does not start in the middle of a cache line.
927 * 2. The data does not end in the middle of a cache line 904 * 2. The data does not end in the middle of a cache line
928 * 3. The buffer can be aligned such that the IP addresses are 905 * 3. The buffer can be aligned such that the IP addresses are
929 * naturally aligned. 906 * naturally aligned.
930 * 907 *
931 * Remember, the SOCs MAC writes whole cache lines at a time, 908 * Remember, the SOCs MAC writes whole cache lines at a time,
@@ -933,7 +910,7 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
933 * data portion starts in the middle of a cache line, the SOC 910 * data portion starts in the middle of a cache line, the SOC
934 * DMA will trash the beginning (and ending) portions. 911 * DMA will trash the beginning (and ending) portions.
935 */ 912 */
936 913
937 if (sb == NULL) { 914 if (sb == NULL) {
938 sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); 915 sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
939 if (sb_new == NULL) { 916 if (sb_new == NULL) {
@@ -949,23 +926,22 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
949 } 926 }
950 else { 927 else {
951 sb_new = sb; 928 sb_new = sb;
952 /* 929 /*
953 * nothing special to reinit buffer, it's already aligned 930 * nothing special to reinit buffer, it's already aligned
954 * and sb->data already points to a good place. 931 * and sb->data already points to a good place.
955 */ 932 */
956 } 933 }
957 934
958 /* 935 /*
959 * fill in the descriptor 936 * fill in the descriptor
960 */ 937 */
961 938
962#ifdef CONFIG_SBMAC_COALESCE 939#ifdef CONFIG_SBMAC_COALESCE
963 /* 940 /*
964 * Do not interrupt per DMA transfer. 941 * Do not interrupt per DMA transfer.
965 */ 942 */
966 dsc->dscr_a = virt_to_phys(sb_new->data) | 943 dsc->dscr_a = virt_to_phys(sb_new->data) |
967 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 944 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
968 0;
969#else 945#else
970 dsc->dscr_a = virt_to_phys(sb_new->data) | 946 dsc->dscr_a = virt_to_phys(sb_new->data) |
971 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 947 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
@@ -974,38 +950,38 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
974 950
975 /* receiving: no options */ 951 /* receiving: no options */
976 dsc->dscr_b = 0; 952 dsc->dscr_b = 0;
977 953
978 /* 954 /*
979 * fill in the context 955 * fill in the context
980 */ 956 */
981 957
982 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new; 958 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
983 959
984 /* 960 /*
985 * point at next packet 961 * point at next packet
986 */ 962 */
987 963
988 d->sbdma_addptr = nextdsc; 964 d->sbdma_addptr = nextdsc;
989 965
990 /* 966 /*
991 * Give the buffer to the DMA engine. 967 * Give the buffer to the DMA engine.
992 */ 968 */
993 969
994 SBMAC_WRITECSR(d->sbdma_dscrcnt,1); 970 __raw_writeq(1, d->sbdma_dscrcnt);
995 971
996 return 0; /* we did it */ 972 return 0; /* we did it */
997} 973}
998 974
999/********************************************************************** 975/**********************************************************************
1000 * SBDMA_ADD_TXBUFFER(d,sb) 976 * SBDMA_ADD_TXBUFFER(d,sb)
1001 * 977 *
1002 * Add a transmit buffer to the specified DMA channel, causing a 978 * Add a transmit buffer to the specified DMA channel, causing a
1003 * transmit to start. 979 * transmit to start.
1004 * 980 *
1005 * Input parameters: 981 * Input parameters:
1006 * d - DMA channel descriptor 982 * d - DMA channel descriptor
1007 * sb - sk_buff to add 983 * sb - sk_buff to add
1008 * 984 *
1009 * Return value: 985 * Return value:
1010 * 0 transmit queued successfully 986 * 0 transmit queued successfully
1011 * otherwise error code 987 * otherwise error code
@@ -1019,70 +995,70 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
1019 uint64_t phys; 995 uint64_t phys;
1020 uint64_t ncb; 996 uint64_t ncb;
1021 int length; 997 int length;
1022 998
1023 /* get pointer to our current place in the ring */ 999 /* get pointer to our current place in the ring */
1024 1000
1025 dsc = d->sbdma_addptr; 1001 dsc = d->sbdma_addptr;
1026 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr); 1002 nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
1027 1003
1028 /* 1004 /*
1029 * figure out if the ring is full - if the next descriptor 1005 * figure out if the ring is full - if the next descriptor
1030 * is the same as the one that we're going to remove from 1006 * is the same as the one that we're going to remove from
1031 * the ring, the ring is full 1007 * the ring, the ring is full
1032 */ 1008 */
1033 1009
1034 if (nextdsc == d->sbdma_remptr) { 1010 if (nextdsc == d->sbdma_remptr) {
1035 return -ENOSPC; 1011 return -ENOSPC;
1036 } 1012 }
1037 1013
1038 /* 1014 /*
1039 * Under Linux, it's not necessary to copy/coalesce buffers 1015 * Under Linux, it's not necessary to copy/coalesce buffers
1040 * like it is on NetBSD. We think they're all contiguous, 1016 * like it is on NetBSD. We think they're all contiguous,
1041 * but that may not be true for GBE. 1017 * but that may not be true for GBE.
1042 */ 1018 */
1043 1019
1044 length = sb->len; 1020 length = sb->len;
1045 1021
1046 /* 1022 /*
1047 * fill in the descriptor. Note that the number of cache 1023 * fill in the descriptor. Note that the number of cache
1048 * blocks in the descriptor is the number of blocks 1024 * blocks in the descriptor is the number of blocks
1049 * *spanned*, so we need to add in the offset (if any) 1025 * *spanned*, so we need to add in the offset (if any)
1050 * while doing the calculation. 1026 * while doing the calculation.
1051 */ 1027 */
1052 1028
1053 phys = virt_to_phys(sb->data); 1029 phys = virt_to_phys(sb->data);
1054 ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1))); 1030 ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
1055 1031
1056 dsc->dscr_a = phys | 1032 dsc->dscr_a = phys |
1057 V_DMA_DSCRA_A_SIZE(ncb) | 1033 V_DMA_DSCRA_A_SIZE(ncb) |
1058#ifndef CONFIG_SBMAC_COALESCE 1034#ifndef CONFIG_SBMAC_COALESCE
1059 M_DMA_DSCRA_INTERRUPT | 1035 M_DMA_DSCRA_INTERRUPT |
1060#endif 1036#endif
1061 M_DMA_ETHTX_SOP; 1037 M_DMA_ETHTX_SOP;
1062 1038
1063 /* transmitting: set outbound options and length */ 1039 /* transmitting: set outbound options and length */
1064 1040
1065 dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) | 1041 dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
1066 V_DMA_DSCRB_PKT_SIZE(length); 1042 V_DMA_DSCRB_PKT_SIZE(length);
1067 1043
1068 /* 1044 /*
1069 * fill in the context 1045 * fill in the context
1070 */ 1046 */
1071 1047
1072 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb; 1048 d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
1073 1049
1074 /* 1050 /*
1075 * point at next packet 1051 * point at next packet
1076 */ 1052 */
1077 1053
1078 d->sbdma_addptr = nextdsc; 1054 d->sbdma_addptr = nextdsc;
1079 1055
1080 /* 1056 /*
1081 * Give the buffer to the DMA engine. 1057 * Give the buffer to the DMA engine.
1082 */ 1058 */
1083 1059
1084 SBMAC_WRITECSR(d->sbdma_dscrcnt,1); 1060 __raw_writeq(1, d->sbdma_dscrcnt);
1085 1061
1086 return 0; /* we did it */ 1062 return 0; /* we did it */
1087} 1063}
1088 1064
@@ -1091,12 +1067,12 @@ static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
1091 1067
1092/********************************************************************** 1068/**********************************************************************
1093 * SBDMA_EMPTYRING(d) 1069 * SBDMA_EMPTYRING(d)
1094 * 1070 *
1095 * Free all allocated sk_buffs on the specified DMA channel; 1071 * Free all allocated sk_buffs on the specified DMA channel;
1096 * 1072 *
1097 * Input parameters: 1073 * Input parameters:
1098 * d - DMA channel 1074 * d - DMA channel
1099 * 1075 *
1100 * Return value: 1076 * Return value:
1101 * nothing 1077 * nothing
1102 ********************************************************************* */ 1078 ********************************************************************* */
@@ -1105,7 +1081,7 @@ static void sbdma_emptyring(sbmacdma_t *d)
1105{ 1081{
1106 int idx; 1082 int idx;
1107 struct sk_buff *sb; 1083 struct sk_buff *sb;
1108 1084
1109 for (idx = 0; idx < d->sbdma_maxdescr; idx++) { 1085 for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
1110 sb = d->sbdma_ctxtable[idx]; 1086 sb = d->sbdma_ctxtable[idx];
1111 if (sb) { 1087 if (sb) {
@@ -1118,13 +1094,13 @@ static void sbdma_emptyring(sbmacdma_t *d)
1118 1094
1119/********************************************************************** 1095/**********************************************************************
1120 * SBDMA_FILLRING(d) 1096 * SBDMA_FILLRING(d)
1121 * 1097 *
1122 * Fill the specified DMA channel (must be receive channel) 1098 * Fill the specified DMA channel (must be receive channel)
1123 * with sk_buffs 1099 * with sk_buffs
1124 * 1100 *
1125 * Input parameters: 1101 * Input parameters:
1126 * d - DMA channel 1102 * d - DMA channel
1127 * 1103 *
1128 * Return value: 1104 * Return value:
1129 * nothing 1105 * nothing
1130 ********************************************************************* */ 1106 ********************************************************************* */
@@ -1132,7 +1108,7 @@ static void sbdma_emptyring(sbmacdma_t *d)
1132static void sbdma_fillring(sbmacdma_t *d) 1108static void sbdma_fillring(sbmacdma_t *d)
1133{ 1109{
1134 int idx; 1110 int idx;
1135 1111
1136 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { 1112 for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
1137 if (sbdma_add_rcvbuffer(d,NULL) != 0) 1113 if (sbdma_add_rcvbuffer(d,NULL) != 0)
1138 break; 1114 break;
@@ -1142,16 +1118,16 @@ static void sbdma_fillring(sbmacdma_t *d)
1142 1118
1143/********************************************************************** 1119/**********************************************************************
1144 * SBDMA_RX_PROCESS(sc,d) 1120 * SBDMA_RX_PROCESS(sc,d)
1145 * 1121 *
1146 * Process "completed" receive buffers on the specified DMA channel. 1122 * Process "completed" receive buffers on the specified DMA channel.
1147 * Note that this isn't really ideal for priority channels, since 1123 * Note that this isn't really ideal for priority channels, since
1148 * it processes all of the packets on a given channel before 1124 * it processes all of the packets on a given channel before
1149 * returning. 1125 * returning.
1150 * 1126 *
1151 * Input parameters: 1127 * Input parameters:
1152 * sc - softc structure 1128 * sc - softc structure
1153 * d - DMA channel context 1129 * d - DMA channel context
1154 * 1130 *
1155 * Return value: 1131 * Return value:
1156 * nothing 1132 * nothing
1157 ********************************************************************* */ 1133 ********************************************************************* */
@@ -1163,56 +1139,56 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1163 sbdmadscr_t *dsc; 1139 sbdmadscr_t *dsc;
1164 struct sk_buff *sb; 1140 struct sk_buff *sb;
1165 int len; 1141 int len;
1166 1142
1167 for (;;) { 1143 for (;;) {
1168 /* 1144 /*
1169 * figure out where we are (as an index) and where 1145 * figure out where we are (as an index) and where
1170 * the hardware is (also as an index) 1146 * the hardware is (also as an index)
1171 * 1147 *
1172 * This could be done faster if (for example) the 1148 * This could be done faster if (for example) the
1173 * descriptor table was page-aligned and contiguous in 1149 * descriptor table was page-aligned and contiguous in
1174 * both virtual and physical memory -- you could then 1150 * both virtual and physical memory -- you could then
1175 * just compare the low-order bits of the virtual address 1151 * just compare the low-order bits of the virtual address
1176 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) 1152 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1177 */ 1153 */
1178 1154
1179 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1155 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1180 hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1156 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1181 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); 1157 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1182 1158
1183 /* 1159 /*
1184 * If they're the same, that means we've processed all 1160 * If they're the same, that means we've processed all
1185 * of the descriptors up to (but not including) the one that 1161 * of the descriptors up to (but not including) the one that
1186 * the hardware is working on right now. 1162 * the hardware is working on right now.
1187 */ 1163 */
1188 1164
1189 if (curidx == hwidx) 1165 if (curidx == hwidx)
1190 break; 1166 break;
1191 1167
1192 /* 1168 /*
1193 * Otherwise, get the packet's sk_buff ptr back 1169 * Otherwise, get the packet's sk_buff ptr back
1194 */ 1170 */
1195 1171
1196 dsc = &(d->sbdma_dscrtable[curidx]); 1172 dsc = &(d->sbdma_dscrtable[curidx]);
1197 sb = d->sbdma_ctxtable[curidx]; 1173 sb = d->sbdma_ctxtable[curidx];
1198 d->sbdma_ctxtable[curidx] = NULL; 1174 d->sbdma_ctxtable[curidx] = NULL;
1199 1175
1200 len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4; 1176 len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
1201 1177
1202 /* 1178 /*
1203 * Check packet status. If good, process it. 1179 * Check packet status. If good, process it.
1204 * If not, silently drop it and put it back on the 1180 * If not, silently drop it and put it back on the
1205 * receive ring. 1181 * receive ring.
1206 */ 1182 */
1207 1183
1208 if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) { 1184 if (!(dsc->dscr_a & M_DMA_ETHRX_BAD)) {
1209 1185
1210 /* 1186 /*
1211 * Add a new buffer to replace the old one. If we fail 1187 * Add a new buffer to replace the old one. If we fail
1212 * to allocate a buffer, we're going to drop this 1188 * to allocate a buffer, we're going to drop this
1213 * packet and put it right back on the receive ring. 1189 * packet and put it right back on the receive ring.
1214 */ 1190 */
1215 1191
1216 if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) { 1192 if (sbdma_add_rcvbuffer(d,NULL) == -ENOBUFS) {
1217 sc->sbm_stats.rx_dropped++; 1193 sc->sbm_stats.rx_dropped++;
1218 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ 1194 sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
@@ -1221,7 +1197,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1221 * Set length into the packet 1197 * Set length into the packet
1222 */ 1198 */
1223 skb_put(sb,len); 1199 skb_put(sb,len);
1224 1200
1225 /* 1201 /*
1226 * Buffer has been replaced on the 1202 * Buffer has been replaced on the
1227 * receive ring. Pass the buffer to 1203 * receive ring. Pass the buffer to
@@ -1240,7 +1216,7 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1240 sb->ip_summed = CHECKSUM_NONE; 1216 sb->ip_summed = CHECKSUM_NONE;
1241 } 1217 }
1242 } 1218 }
1243 1219
1244 netif_rx(sb); 1220 netif_rx(sb);
1245 } 1221 }
1246 } else { 1222 } else {
@@ -1251,14 +1227,14 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1251 sc->sbm_stats.rx_errors++; 1227 sc->sbm_stats.rx_errors++;
1252 sbdma_add_rcvbuffer(d,sb); 1228 sbdma_add_rcvbuffer(d,sb);
1253 } 1229 }
1254 1230
1255 1231
1256 /* 1232 /*
1257 * .. and advance to the next buffer. 1233 * .. and advance to the next buffer.
1258 */ 1234 */
1259 1235
1260 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1236 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1261 1237
1262 } 1238 }
1263} 1239}
1264 1240
@@ -1266,17 +1242,17 @@ static void sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1266 1242
1267/********************************************************************** 1243/**********************************************************************
1268 * SBDMA_TX_PROCESS(sc,d) 1244 * SBDMA_TX_PROCESS(sc,d)
1269 * 1245 *
1270 * Process "completed" transmit buffers on the specified DMA channel. 1246 * Process "completed" transmit buffers on the specified DMA channel.
1271 * This is normally called within the interrupt service routine. 1247 * This is normally called within the interrupt service routine.
1272 * Note that this isn't really ideal for priority channels, since 1248 * Note that this isn't really ideal for priority channels, since
1273 * it processes all of the packets on a given channel before 1249 * it processes all of the packets on a given channel before
1274 * returning. 1250 * returning.
1275 * 1251 *
1276 * Input parameters: 1252 * Input parameters:
1277 * sc - softc structure 1253 * sc - softc structure
1278 * d - DMA channel context 1254 * d - DMA channel context
1279 * 1255 *
1280 * Return value: 1256 * Return value:
1281 * nothing 1257 * nothing
1282 ********************************************************************* */ 1258 ********************************************************************* */
@@ -1290,21 +1266,21 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1290 unsigned long flags; 1266 unsigned long flags;
1291 1267
1292 spin_lock_irqsave(&(sc->sbm_lock), flags); 1268 spin_lock_irqsave(&(sc->sbm_lock), flags);
1293 1269
1294 for (;;) { 1270 for (;;) {
1295 /* 1271 /*
1296 * figure out where we are (as an index) and where 1272 * figure out where we are (as an index) and where
1297 * the hardware is (also as an index) 1273 * the hardware is (also as an index)
1298 * 1274 *
1299 * This could be done faster if (for example) the 1275 * This could be done faster if (for example) the
1300 * descriptor table was page-aligned and contiguous in 1276 * descriptor table was page-aligned and contiguous in
1301 * both virtual and physical memory -- you could then 1277 * both virtual and physical memory -- you could then
1302 * just compare the low-order bits of the virtual address 1278 * just compare the low-order bits of the virtual address
1303 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR) 1279 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1304 */ 1280 */
1305 1281
1306 curidx = d->sbdma_remptr - d->sbdma_dscrtable; 1282 curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1307 hwidx = (int) (((SBMAC_READCSR(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) - 1283 hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1308 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t)); 1284 d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1309 1285
1310 /* 1286 /*
@@ -1312,75 +1288,75 @@ static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d)
1312 * of the descriptors up to (but not including) the one that 1288 * of the descriptors up to (but not including) the one that
1313 * the hardware is working on right now. 1289 * the hardware is working on right now.
1314 */ 1290 */
1315 1291
1316 if (curidx == hwidx) 1292 if (curidx == hwidx)
1317 break; 1293 break;
1318 1294
1319 /* 1295 /*
1320 * Otherwise, get the packet's sk_buff ptr back 1296 * Otherwise, get the packet's sk_buff ptr back
1321 */ 1297 */
1322 1298
1323 dsc = &(d->sbdma_dscrtable[curidx]); 1299 dsc = &(d->sbdma_dscrtable[curidx]);
1324 sb = d->sbdma_ctxtable[curidx]; 1300 sb = d->sbdma_ctxtable[curidx];
1325 d->sbdma_ctxtable[curidx] = NULL; 1301 d->sbdma_ctxtable[curidx] = NULL;
1326 1302
1327 /* 1303 /*
1328 * Stats 1304 * Stats
1329 */ 1305 */
1330 1306
1331 sc->sbm_stats.tx_bytes += sb->len; 1307 sc->sbm_stats.tx_bytes += sb->len;
1332 sc->sbm_stats.tx_packets++; 1308 sc->sbm_stats.tx_packets++;
1333 1309
1334 /* 1310 /*
1335 * for transmits, we just free buffers. 1311 * for transmits, we just free buffers.
1336 */ 1312 */
1337 1313
1338 dev_kfree_skb_irq(sb); 1314 dev_kfree_skb_irq(sb);
1339 1315
1340 /* 1316 /*
1341 * .. and advance to the next buffer. 1317 * .. and advance to the next buffer.
1342 */ 1318 */
1343 1319
1344 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); 1320 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1345 1321
1346 } 1322 }
1347 1323
1348 /* 1324 /*
1349 * Decide if we should wake up the protocol or not. 1325 * Decide if we should wake up the protocol or not.
1350 * Other drivers seem to do this when we reach a low 1326 * Other drivers seem to do this when we reach a low
1351 * watermark on the transmit queue. 1327 * watermark on the transmit queue.
1352 */ 1328 */
1353 1329
1354 netif_wake_queue(d->sbdma_eth->sbm_dev); 1330 netif_wake_queue(d->sbdma_eth->sbm_dev);
1355 1331
1356 spin_unlock_irqrestore(&(sc->sbm_lock), flags); 1332 spin_unlock_irqrestore(&(sc->sbm_lock), flags);
1357 1333
1358} 1334}
1359 1335
1360 1336
1361 1337
1362/********************************************************************** 1338/**********************************************************************
1363 * SBMAC_INITCTX(s) 1339 * SBMAC_INITCTX(s)
1364 * 1340 *
1365 * Initialize an Ethernet context structure - this is called 1341 * Initialize an Ethernet context structure - this is called
1366 * once per MAC on the 1250. Memory is allocated here, so don't 1342 * once per MAC on the 1250. Memory is allocated here, so don't
1367 * call it again from inside the ioctl routines that bring the 1343 * call it again from inside the ioctl routines that bring the
1368 * interface up/down 1344 * interface up/down
1369 * 1345 *
1370 * Input parameters: 1346 * Input parameters:
1371 * s - sbmac context structure 1347 * s - sbmac context structure
1372 * 1348 *
1373 * Return value: 1349 * Return value:
1374 * 0 1350 * 0
1375 ********************************************************************* */ 1351 ********************************************************************* */
1376 1352
1377static int sbmac_initctx(struct sbmac_softc *s) 1353static int sbmac_initctx(struct sbmac_softc *s)
1378{ 1354{
1379 1355
1380 /* 1356 /*
1381 * figure out the addresses of some ports 1357 * figure out the addresses of some ports
1382 */ 1358 */
1383 1359
1384 s->sbm_macenable = s->sbm_base + R_MAC_ENABLE; 1360 s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
1385 s->sbm_maccfg = s->sbm_base + R_MAC_CFG; 1361 s->sbm_maccfg = s->sbm_base + R_MAC_CFG;
1386 s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG; 1362 s->sbm_fifocfg = s->sbm_base + R_MAC_THRSH_CFG;
@@ -1397,29 +1373,29 @@ static int sbmac_initctx(struct sbmac_softc *s)
1397 s->sbm_phy_oldanlpar = 0; 1373 s->sbm_phy_oldanlpar = 0;
1398 s->sbm_phy_oldk1stsr = 0; 1374 s->sbm_phy_oldk1stsr = 0;
1399 s->sbm_phy_oldlinkstat = 0; 1375 s->sbm_phy_oldlinkstat = 0;
1400 1376
1401 /* 1377 /*
1402 * Initialize the DMA channels. Right now, only one per MAC is used 1378 * Initialize the DMA channels. Right now, only one per MAC is used
1403 * Note: Only do this _once_, as it allocates memory from the kernel! 1379 * Note: Only do this _once_, as it allocates memory from the kernel!
1404 */ 1380 */
1405 1381
1406 sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR); 1382 sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
1407 sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR); 1383 sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
1408 1384
1409 /* 1385 /*
1410 * initial state is OFF 1386 * initial state is OFF
1411 */ 1387 */
1412 1388
1413 s->sbm_state = sbmac_state_off; 1389 s->sbm_state = sbmac_state_off;
1414 1390
1415 /* 1391 /*
1416 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC 1392 * Initial speed is (XXX TEMP) 10MBit/s HDX no FC
1417 */ 1393 */
1418 1394
1419 s->sbm_speed = sbmac_speed_10; 1395 s->sbm_speed = sbmac_speed_10;
1420 s->sbm_duplex = sbmac_duplex_half; 1396 s->sbm_duplex = sbmac_duplex_half;
1421 s->sbm_fc = sbmac_fc_disabled; 1397 s->sbm_fc = sbmac_fc_disabled;
1422 1398
1423 return 0; 1399 return 0;
1424} 1400}
1425 1401
@@ -1430,7 +1406,7 @@ static void sbdma_uninitctx(struct sbmacdma_s *d)
1430 kfree(d->sbdma_dscrtable); 1406 kfree(d->sbdma_dscrtable);
1431 d->sbdma_dscrtable = NULL; 1407 d->sbdma_dscrtable = NULL;
1432 } 1408 }
1433 1409
1434 if (d->sbdma_ctxtable) { 1410 if (d->sbdma_ctxtable) {
1435 kfree(d->sbdma_ctxtable); 1411 kfree(d->sbdma_ctxtable);
1436 d->sbdma_ctxtable = NULL; 1412 d->sbdma_ctxtable = NULL;
@@ -1447,12 +1423,12 @@ static void sbmac_uninitctx(struct sbmac_softc *sc)
1447 1423
1448/********************************************************************** 1424/**********************************************************************
1449 * SBMAC_CHANNEL_START(s) 1425 * SBMAC_CHANNEL_START(s)
1450 * 1426 *
1451 * Start packet processing on this MAC. 1427 * Start packet processing on this MAC.
1452 * 1428 *
1453 * Input parameters: 1429 * Input parameters:
1454 * s - sbmac structure 1430 * s - sbmac structure
1455 * 1431 *
1456 * Return value: 1432 * Return value:
1457 * nothing 1433 * nothing
1458 ********************************************************************* */ 1434 ********************************************************************* */
@@ -1460,49 +1436,49 @@ static void sbmac_uninitctx(struct sbmac_softc *sc)
1460static void sbmac_channel_start(struct sbmac_softc *s) 1436static void sbmac_channel_start(struct sbmac_softc *s)
1461{ 1437{
1462 uint64_t reg; 1438 uint64_t reg;
1463 sbmac_port_t port; 1439 volatile void __iomem *port;
1464 uint64_t cfg,fifo,framecfg; 1440 uint64_t cfg,fifo,framecfg;
1465 int idx, th_value; 1441 int idx, th_value;
1466 1442
1467 /* 1443 /*
1468 * Don't do this if running 1444 * Don't do this if running
1469 */ 1445 */
1470 1446
1471 if (s->sbm_state == sbmac_state_on) 1447 if (s->sbm_state == sbmac_state_on)
1472 return; 1448 return;
1473 1449
1474 /* 1450 /*
1475 * Bring the controller out of reset, but leave it off. 1451 * Bring the controller out of reset, but leave it off.
1476 */ 1452 */
1477 1453
1478 SBMAC_WRITECSR(s->sbm_macenable,0); 1454 __raw_writeq(0, s->sbm_macenable);
1479 1455
1480 /* 1456 /*
1481 * Ignore all received packets 1457 * Ignore all received packets
1482 */ 1458 */
1483 1459
1484 SBMAC_WRITECSR(s->sbm_rxfilter,0); 1460 __raw_writeq(0, s->sbm_rxfilter);
1485 1461
1486 /* 1462 /*
1487 * Calculate values for various control registers. 1463 * Calculate values for various control registers.
1488 */ 1464 */
1489 1465
1490 cfg = M_MAC_RETRY_EN | 1466 cfg = M_MAC_RETRY_EN |
1491 M_MAC_TX_HOLD_SOP_EN | 1467 M_MAC_TX_HOLD_SOP_EN |
1492 V_MAC_TX_PAUSE_CNT_16K | 1468 V_MAC_TX_PAUSE_CNT_16K |
1493 M_MAC_AP_STAT_EN | 1469 M_MAC_AP_STAT_EN |
1494 M_MAC_FAST_SYNC | 1470 M_MAC_FAST_SYNC |
1495 M_MAC_SS_EN | 1471 M_MAC_SS_EN |
1496 0; 1472 0;
1497 1473
1498 /* 1474 /*
1499 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars 1475 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
1500 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above 1476 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
1501 * Use a larger RD_THRSH for gigabit 1477 * Use a larger RD_THRSH for gigabit
1502 */ 1478 */
1503 if (periph_rev >= 2) 1479 if (periph_rev >= 2)
1504 th_value = 64; 1480 th_value = 64;
1505 else 1481 else
1506 th_value = 28; 1482 th_value = 28;
1507 1483
1508 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */ 1484 fifo = V_MAC_TX_WR_THRSH(4) | /* Must be '4' or '8' */
@@ -1520,51 +1496,51 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1520 V_MAC_BACKOFF_SEL(1); 1496 V_MAC_BACKOFF_SEL(1);
1521 1497
1522 /* 1498 /*
1523 * Clear out the hash address map 1499 * Clear out the hash address map
1524 */ 1500 */
1525 1501
1526 port = s->sbm_base + R_MAC_HASH_BASE; 1502 port = s->sbm_base + R_MAC_HASH_BASE;
1527 for (idx = 0; idx < MAC_HASH_COUNT; idx++) { 1503 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1528 SBMAC_WRITECSR(port,0); 1504 __raw_writeq(0, port);
1529 port += sizeof(uint64_t); 1505 port += sizeof(uint64_t);
1530 } 1506 }
1531 1507
1532 /* 1508 /*
1533 * Clear out the exact-match table 1509 * Clear out the exact-match table
1534 */ 1510 */
1535 1511
1536 port = s->sbm_base + R_MAC_ADDR_BASE; 1512 port = s->sbm_base + R_MAC_ADDR_BASE;
1537 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) { 1513 for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1538 SBMAC_WRITECSR(port,0); 1514 __raw_writeq(0, port);
1539 port += sizeof(uint64_t); 1515 port += sizeof(uint64_t);
1540 } 1516 }
1541 1517
1542 /* 1518 /*
1543 * Clear out the DMA Channel mapping table registers 1519 * Clear out the DMA Channel mapping table registers
1544 */ 1520 */
1545 1521
1546 port = s->sbm_base + R_MAC_CHUP0_BASE; 1522 port = s->sbm_base + R_MAC_CHUP0_BASE;
1547 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { 1523 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1548 SBMAC_WRITECSR(port,0); 1524 __raw_writeq(0, port);
1549 port += sizeof(uint64_t); 1525 port += sizeof(uint64_t);
1550 } 1526 }
1551 1527
1552 1528
1553 port = s->sbm_base + R_MAC_CHLO0_BASE; 1529 port = s->sbm_base + R_MAC_CHLO0_BASE;
1554 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) { 1530 for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1555 SBMAC_WRITECSR(port,0); 1531 __raw_writeq(0, port);
1556 port += sizeof(uint64_t); 1532 port += sizeof(uint64_t);
1557 } 1533 }
1558 1534
1559 /* 1535 /*
1560 * Program the hardware address. It goes into the hardware-address 1536 * Program the hardware address. It goes into the hardware-address
1561 * register as well as the first filter register. 1537 * register as well as the first filter register.
1562 */ 1538 */
1563 1539
1564 reg = sbmac_addr2reg(s->sbm_hwaddr); 1540 reg = sbmac_addr2reg(s->sbm_hwaddr);
1565 1541
1566 port = s->sbm_base + R_MAC_ADDR_BASE; 1542 port = s->sbm_base + R_MAC_ADDR_BASE;
1567 SBMAC_WRITECSR(port,reg); 1543 __raw_writeq(reg, port);
1568 port = s->sbm_base + R_MAC_ETHERNET_ADDR; 1544 port = s->sbm_base + R_MAC_ETHERNET_ADDR;
1569 1545
1570#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS 1546#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
@@ -1573,108 +1549,105 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1573 * destination address in the R_MAC_ETHERNET_ADDR register. 1549 * destination address in the R_MAC_ETHERNET_ADDR register.
1574 * Set the value to zero. 1550 * Set the value to zero.
1575 */ 1551 */
1576 SBMAC_WRITECSR(port,0); 1552 __raw_writeq(0, port);
1577#else 1553#else
1578 SBMAC_WRITECSR(port,reg); 1554 __raw_writeq(reg, port);
1579#endif 1555#endif
1580 1556
1581 /* 1557 /*
1582 * Set the receive filter for no packets, and write values 1558 * Set the receive filter for no packets, and write values
1583 * to the various config registers 1559 * to the various config registers
1584 */ 1560 */
1585 1561
1586 SBMAC_WRITECSR(s->sbm_rxfilter,0); 1562 __raw_writeq(0, s->sbm_rxfilter);
1587 SBMAC_WRITECSR(s->sbm_imr,0); 1563 __raw_writeq(0, s->sbm_imr);
1588 SBMAC_WRITECSR(s->sbm_framecfg,framecfg); 1564 __raw_writeq(framecfg, s->sbm_framecfg);
1589 SBMAC_WRITECSR(s->sbm_fifocfg,fifo); 1565 __raw_writeq(fifo, s->sbm_fifocfg);
1590 SBMAC_WRITECSR(s->sbm_maccfg,cfg); 1566 __raw_writeq(cfg, s->sbm_maccfg);
1591 1567
1592 /* 1568 /*
1593 * Initialize DMA channels (rings should be ok now) 1569 * Initialize DMA channels (rings should be ok now)
1594 */ 1570 */
1595 1571
1596 sbdma_channel_start(&(s->sbm_rxdma), DMA_RX); 1572 sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
1597 sbdma_channel_start(&(s->sbm_txdma), DMA_TX); 1573 sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
1598 1574
1599 /* 1575 /*
1600 * Configure the speed, duplex, and flow control 1576 * Configure the speed, duplex, and flow control
1601 */ 1577 */
1602 1578
1603 sbmac_set_speed(s,s->sbm_speed); 1579 sbmac_set_speed(s,s->sbm_speed);
1604 sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc); 1580 sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
1605 1581
1606 /* 1582 /*
1607 * Fill the receive ring 1583 * Fill the receive ring
1608 */ 1584 */
1609 1585
1610 sbdma_fillring(&(s->sbm_rxdma)); 1586 sbdma_fillring(&(s->sbm_rxdma));
1611 1587
1612 /* 1588 /*
1613 * Turn on the rest of the bits in the enable register 1589 * Turn on the rest of the bits in the enable register
1614 */ 1590 */
1615 1591
1616 SBMAC_WRITECSR(s->sbm_macenable, 1592 __raw_writeq(M_MAC_RXDMA_EN0 |
1617 M_MAC_RXDMA_EN0 |
1618 M_MAC_TXDMA_EN0 | 1593 M_MAC_TXDMA_EN0 |
1619 M_MAC_RX_ENABLE | 1594 M_MAC_RX_ENABLE |
1620 M_MAC_TX_ENABLE); 1595 M_MAC_TX_ENABLE, s->sbm_macenable);
1621 1596
1622 1597
1623 1598
1624 1599
1625#ifdef CONFIG_SBMAC_COALESCE 1600#ifdef CONFIG_SBMAC_COALESCE
1626 /* 1601 /*
1627 * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0 1602 * Accept any TX interrupt and EOP count/timer RX interrupts on ch 0
1628 */ 1603 */
1629 SBMAC_WRITECSR(s->sbm_imr, 1604 __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1630 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) | 1605 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
1631 ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0));
1632#else 1606#else
1633 /* 1607 /*
1634 * Accept any kind of interrupt on TX and RX DMA channel 0 1608 * Accept any kind of interrupt on TX and RX DMA channel 0
1635 */ 1609 */
1636 SBMAC_WRITECSR(s->sbm_imr, 1610 __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1637 (M_MAC_INT_CHANNEL << S_MAC_TX_CH0) | 1611 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
1638 (M_MAC_INT_CHANNEL << S_MAC_RX_CH0));
1639#endif 1612#endif
1640 1613
1641 /* 1614 /*
1642 * Enable receiving unicasts and broadcasts 1615 * Enable receiving unicasts and broadcasts
1643 */ 1616 */
1644 1617
1645 SBMAC_WRITECSR(s->sbm_rxfilter,M_MAC_UCAST_EN | M_MAC_BCAST_EN); 1618 __raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
1646 1619
1647 /* 1620 /*
1648 * we're running now. 1621 * we're running now.
1649 */ 1622 */
1650 1623
1651 s->sbm_state = sbmac_state_on; 1624 s->sbm_state = sbmac_state_on;
1652 1625
1653 /* 1626 /*
1654 * Program multicast addresses 1627 * Program multicast addresses
1655 */ 1628 */
1656 1629
1657 sbmac_setmulti(s); 1630 sbmac_setmulti(s);
1658 1631
1659 /* 1632 /*
1660 * If channel was in promiscuous mode before, turn that on 1633 * If channel was in promiscuous mode before, turn that on
1661 */ 1634 */
1662 1635
1663 if (s->sbm_devflags & IFF_PROMISC) { 1636 if (s->sbm_devflags & IFF_PROMISC) {
1664 sbmac_promiscuous_mode(s,1); 1637 sbmac_promiscuous_mode(s,1);
1665 } 1638 }
1666 1639
1667} 1640}
1668 1641
1669 1642
1670/********************************************************************** 1643/**********************************************************************
1671 * SBMAC_CHANNEL_STOP(s) 1644 * SBMAC_CHANNEL_STOP(s)
1672 * 1645 *
1673 * Stop packet processing on this MAC. 1646 * Stop packet processing on this MAC.
1674 * 1647 *
1675 * Input parameters: 1648 * Input parameters:
1676 * s - sbmac structure 1649 * s - sbmac structure
1677 * 1650 *
1678 * Return value: 1651 * Return value:
1679 * nothing 1652 * nothing
1680 ********************************************************************* */ 1653 ********************************************************************* */
@@ -1682,49 +1655,49 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1682static void sbmac_channel_stop(struct sbmac_softc *s) 1655static void sbmac_channel_stop(struct sbmac_softc *s)
1683{ 1656{
1684 /* don't do this if already stopped */ 1657 /* don't do this if already stopped */
1685 1658
1686 if (s->sbm_state == sbmac_state_off) 1659 if (s->sbm_state == sbmac_state_off)
1687 return; 1660 return;
1688 1661
1689 /* don't accept any packets, disable all interrupts */ 1662 /* don't accept any packets, disable all interrupts */
1690 1663
1691 SBMAC_WRITECSR(s->sbm_rxfilter,0); 1664 __raw_writeq(0, s->sbm_rxfilter);
1692 SBMAC_WRITECSR(s->sbm_imr,0); 1665 __raw_writeq(0, s->sbm_imr);
1693 1666
1694 /* Turn off ticker */ 1667 /* Turn off ticker */
1695 1668
1696 /* XXX */ 1669 /* XXX */
1697 1670
1698 /* turn off receiver and transmitter */ 1671 /* turn off receiver and transmitter */
1699 1672
1700 SBMAC_WRITECSR(s->sbm_macenable,0); 1673 __raw_writeq(0, s->sbm_macenable);
1701 1674
1702 /* We're stopped now. */ 1675 /* We're stopped now. */
1703 1676
1704 s->sbm_state = sbmac_state_off; 1677 s->sbm_state = sbmac_state_off;
1705 1678
1706 /* 1679 /*
1707 * Stop DMA channels (rings should be ok now) 1680 * Stop DMA channels (rings should be ok now)
1708 */ 1681 */
1709 1682
1710 sbdma_channel_stop(&(s->sbm_rxdma)); 1683 sbdma_channel_stop(&(s->sbm_rxdma));
1711 sbdma_channel_stop(&(s->sbm_txdma)); 1684 sbdma_channel_stop(&(s->sbm_txdma));
1712 1685
1713 /* Empty the receive and transmit rings */ 1686 /* Empty the receive and transmit rings */
1714 1687
1715 sbdma_emptyring(&(s->sbm_rxdma)); 1688 sbdma_emptyring(&(s->sbm_rxdma));
1716 sbdma_emptyring(&(s->sbm_txdma)); 1689 sbdma_emptyring(&(s->sbm_txdma));
1717 1690
1718} 1691}
1719 1692
1720/********************************************************************** 1693/**********************************************************************
1721 * SBMAC_SET_CHANNEL_STATE(state) 1694 * SBMAC_SET_CHANNEL_STATE(state)
1722 * 1695 *
1723 * Set the channel's state ON or OFF 1696 * Set the channel's state ON or OFF
1724 * 1697 *
1725 * Input parameters: 1698 * Input parameters:
1726 * state - new state 1699 * state - new state
1727 * 1700 *
1728 * Return value: 1701 * Return value:
1729 * old state 1702 * old state
1730 ********************************************************************* */ 1703 ********************************************************************* */
@@ -1732,43 +1705,43 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
1732 sbmac_state_t state) 1705 sbmac_state_t state)
1733{ 1706{
1734 sbmac_state_t oldstate = sc->sbm_state; 1707 sbmac_state_t oldstate = sc->sbm_state;
1735 1708
1736 /* 1709 /*
1737 * If same as previous state, return 1710 * If same as previous state, return
1738 */ 1711 */
1739 1712
1740 if (state == oldstate) { 1713 if (state == oldstate) {
1741 return oldstate; 1714 return oldstate;
1742 } 1715 }
1743 1716
1744 /* 1717 /*
1745 * If new state is ON, turn channel on 1718 * If new state is ON, turn channel on
1746 */ 1719 */
1747 1720
1748 if (state == sbmac_state_on) { 1721 if (state == sbmac_state_on) {
1749 sbmac_channel_start(sc); 1722 sbmac_channel_start(sc);
1750 } 1723 }
1751 else { 1724 else {
1752 sbmac_channel_stop(sc); 1725 sbmac_channel_stop(sc);
1753 } 1726 }
1754 1727
1755 /* 1728 /*
1756 * Return previous state 1729 * Return previous state
1757 */ 1730 */
1758 1731
1759 return oldstate; 1732 return oldstate;
1760} 1733}
1761 1734
1762 1735
1763/********************************************************************** 1736/**********************************************************************
1764 * SBMAC_PROMISCUOUS_MODE(sc,onoff) 1737 * SBMAC_PROMISCUOUS_MODE(sc,onoff)
1765 * 1738 *
1766 * Turn on or off promiscuous mode 1739 * Turn on or off promiscuous mode
1767 * 1740 *
1768 * Input parameters: 1741 * Input parameters:
1769 * sc - softc 1742 * sc - softc
1770 * onoff - 1 to turn on, 0 to turn off 1743 * onoff - 1 to turn on, 0 to turn off
1771 * 1744 *
1772 * Return value: 1745 * Return value:
1773 * nothing 1746 * nothing
1774 ********************************************************************* */ 1747 ********************************************************************* */
@@ -1776,30 +1749,30 @@ static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
1776static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff) 1749static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
1777{ 1750{
1778 uint64_t reg; 1751 uint64_t reg;
1779 1752
1780 if (sc->sbm_state != sbmac_state_on) 1753 if (sc->sbm_state != sbmac_state_on)
1781 return; 1754 return;
1782 1755
1783 if (onoff) { 1756 if (onoff) {
1784 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1757 reg = __raw_readq(sc->sbm_rxfilter);
1785 reg |= M_MAC_ALLPKT_EN; 1758 reg |= M_MAC_ALLPKT_EN;
1786 SBMAC_WRITECSR(sc->sbm_rxfilter,reg); 1759 __raw_writeq(reg, sc->sbm_rxfilter);
1787 } 1760 }
1788 else { 1761 else {
1789 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1762 reg = __raw_readq(sc->sbm_rxfilter);
1790 reg &= ~M_MAC_ALLPKT_EN; 1763 reg &= ~M_MAC_ALLPKT_EN;
1791 SBMAC_WRITECSR(sc->sbm_rxfilter,reg); 1764 __raw_writeq(reg, sc->sbm_rxfilter);
1792 } 1765 }
1793} 1766}
1794 1767
1795/********************************************************************** 1768/**********************************************************************
1796 * SBMAC_SETIPHDR_OFFSET(sc,onoff) 1769 * SBMAC_SETIPHDR_OFFSET(sc,onoff)
1797 * 1770 *
1798 * Set the iphdr offset as 15 assuming ethernet encapsulation 1771 * Set the iphdr offset as 15 assuming ethernet encapsulation
1799 * 1772 *
1800 * Input parameters: 1773 * Input parameters:
1801 * sc - softc 1774 * sc - softc
1802 * 1775 *
1803 * Return value: 1776 * Return value:
1804 * nothing 1777 * nothing
1805 ********************************************************************* */ 1778 ********************************************************************* */
@@ -1807,12 +1780,12 @@ static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
1807static void sbmac_set_iphdr_offset(struct sbmac_softc *sc) 1780static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1808{ 1781{
1809 uint64_t reg; 1782 uint64_t reg;
1810 1783
1811 /* Hard code the off set to 15 for now */ 1784 /* Hard code the off set to 15 for now */
1812 reg = SBMAC_READCSR(sc->sbm_rxfilter); 1785 reg = __raw_readq(sc->sbm_rxfilter);
1813 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15); 1786 reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
1814 SBMAC_WRITECSR(sc->sbm_rxfilter,reg); 1787 __raw_writeq(reg, sc->sbm_rxfilter);
1815 1788
1816 /* read system identification to determine revision */ 1789 /* read system identification to determine revision */
1817 if (periph_rev >= 2) { 1790 if (periph_rev >= 2) {
1818 sc->rx_hw_checksum = ENABLE; 1791 sc->rx_hw_checksum = ENABLE;
@@ -1824,13 +1797,13 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1824 1797
1825/********************************************************************** 1798/**********************************************************************
1826 * SBMAC_ADDR2REG(ptr) 1799 * SBMAC_ADDR2REG(ptr)
1827 * 1800 *
1828 * Convert six bytes into the 64-bit register value that 1801 * Convert six bytes into the 64-bit register value that
1829 * we typically write into the SBMAC's address/mcast registers 1802 * we typically write into the SBMAC's address/mcast registers
1830 * 1803 *
1831 * Input parameters: 1804 * Input parameters:
1832 * ptr - pointer to 6 bytes 1805 * ptr - pointer to 6 bytes
1833 * 1806 *
1834 * Return value: 1807 * Return value:
1835 * register value 1808 * register value
1836 ********************************************************************* */ 1809 ********************************************************************* */
@@ -1838,35 +1811,35 @@ static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1838static uint64_t sbmac_addr2reg(unsigned char *ptr) 1811static uint64_t sbmac_addr2reg(unsigned char *ptr)
1839{ 1812{
1840 uint64_t reg = 0; 1813 uint64_t reg = 0;
1841 1814
1842 ptr += 6; 1815 ptr += 6;
1843 1816
1844 reg |= (uint64_t) *(--ptr); 1817 reg |= (uint64_t) *(--ptr);
1845 reg <<= 8; 1818 reg <<= 8;
1846 reg |= (uint64_t) *(--ptr); 1819 reg |= (uint64_t) *(--ptr);
1847 reg <<= 8; 1820 reg <<= 8;
1848 reg |= (uint64_t) *(--ptr); 1821 reg |= (uint64_t) *(--ptr);
1849 reg <<= 8; 1822 reg <<= 8;
1850 reg |= (uint64_t) *(--ptr); 1823 reg |= (uint64_t) *(--ptr);
1851 reg <<= 8; 1824 reg <<= 8;
1852 reg |= (uint64_t) *(--ptr); 1825 reg |= (uint64_t) *(--ptr);
1853 reg <<= 8; 1826 reg <<= 8;
1854 reg |= (uint64_t) *(--ptr); 1827 reg |= (uint64_t) *(--ptr);
1855 1828
1856 return reg; 1829 return reg;
1857} 1830}
1858 1831
1859 1832
1860/********************************************************************** 1833/**********************************************************************
1861 * SBMAC_SET_SPEED(s,speed) 1834 * SBMAC_SET_SPEED(s,speed)
1862 * 1835 *
1863 * Configure LAN speed for the specified MAC. 1836 * Configure LAN speed for the specified MAC.
1864 * Warning: must be called when MAC is off! 1837 * Warning: must be called when MAC is off!
1865 * 1838 *
1866 * Input parameters: 1839 * Input parameters:
1867 * s - sbmac structure 1840 * s - sbmac structure
1868 * speed - speed to set MAC to (see sbmac_speed_t enum) 1841 * speed - speed to set MAC to (see sbmac_speed_t enum)
1869 * 1842 *
1870 * Return value: 1843 * Return value:
1871 * 1 if successful 1844 * 1 if successful
1872 * 0 indicates invalid parameters 1845 * 0 indicates invalid parameters
@@ -1880,31 +1853,31 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1880 /* 1853 /*
1881 * Save new current values 1854 * Save new current values
1882 */ 1855 */
1883 1856
1884 s->sbm_speed = speed; 1857 s->sbm_speed = speed;
1885 1858
1886 if (s->sbm_state == sbmac_state_on) 1859 if (s->sbm_state == sbmac_state_on)
1887 return 0; /* save for next restart */ 1860 return 0; /* save for next restart */
1888 1861
1889 /* 1862 /*
1890 * Read current register values 1863 * Read current register values
1891 */ 1864 */
1892 1865
1893 cfg = SBMAC_READCSR(s->sbm_maccfg); 1866 cfg = __raw_readq(s->sbm_maccfg);
1894 framecfg = SBMAC_READCSR(s->sbm_framecfg); 1867 framecfg = __raw_readq(s->sbm_framecfg);
1895 1868
1896 /* 1869 /*
1897 * Mask out the stuff we want to change 1870 * Mask out the stuff we want to change
1898 */ 1871 */
1899 1872
1900 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL); 1873 cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1901 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH | 1874 framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1902 M_MAC_SLOT_SIZE); 1875 M_MAC_SLOT_SIZE);
1903 1876
1904 /* 1877 /*
1905 * Now add in the new bits 1878 * Now add in the new bits
1906 */ 1879 */
1907 1880
1908 switch (speed) { 1881 switch (speed) {
1909 case sbmac_speed_10: 1882 case sbmac_speed_10:
1910 framecfg |= V_MAC_IFG_RX_10 | 1883 framecfg |= V_MAC_IFG_RX_10 |
@@ -1913,7 +1886,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1913 V_MAC_SLOT_SIZE_10; 1886 V_MAC_SLOT_SIZE_10;
1914 cfg |= V_MAC_SPEED_SEL_10MBPS; 1887 cfg |= V_MAC_SPEED_SEL_10MBPS;
1915 break; 1888 break;
1916 1889
1917 case sbmac_speed_100: 1890 case sbmac_speed_100:
1918 framecfg |= V_MAC_IFG_RX_100 | 1891 framecfg |= V_MAC_IFG_RX_100 |
1919 V_MAC_IFG_TX_100 | 1892 V_MAC_IFG_TX_100 |
@@ -1921,7 +1894,7 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1921 V_MAC_SLOT_SIZE_100; 1894 V_MAC_SLOT_SIZE_100;
1922 cfg |= V_MAC_SPEED_SEL_100MBPS ; 1895 cfg |= V_MAC_SPEED_SEL_100MBPS ;
1923 break; 1896 break;
1924 1897
1925 case sbmac_speed_1000: 1898 case sbmac_speed_1000:
1926 framecfg |= V_MAC_IFG_RX_1000 | 1899 framecfg |= V_MAC_IFG_RX_1000 |
1927 V_MAC_IFG_TX_1000 | 1900 V_MAC_IFG_TX_1000 |
@@ -1929,34 +1902,34 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1929 V_MAC_SLOT_SIZE_1000; 1902 V_MAC_SLOT_SIZE_1000;
1930 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN; 1903 cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1931 break; 1904 break;
1932 1905
1933 case sbmac_speed_auto: /* XXX not implemented */ 1906 case sbmac_speed_auto: /* XXX not implemented */
1934 /* fall through */ 1907 /* fall through */
1935 default: 1908 default:
1936 return 0; 1909 return 0;
1937 } 1910 }
1938 1911
1939 /* 1912 /*
1940 * Send the bits back to the hardware 1913 * Send the bits back to the hardware
1941 */ 1914 */
1942 1915
1943 SBMAC_WRITECSR(s->sbm_framecfg,framecfg); 1916 __raw_writeq(framecfg, s->sbm_framecfg);
1944 SBMAC_WRITECSR(s->sbm_maccfg,cfg); 1917 __raw_writeq(cfg, s->sbm_maccfg);
1945 1918
1946 return 1; 1919 return 1;
1947} 1920}
1948 1921
1949/********************************************************************** 1922/**********************************************************************
1950 * SBMAC_SET_DUPLEX(s,duplex,fc) 1923 * SBMAC_SET_DUPLEX(s,duplex,fc)
1951 * 1924 *
1952 * Set Ethernet duplex and flow control options for this MAC 1925 * Set Ethernet duplex and flow control options for this MAC
1953 * Warning: must be called when MAC is off! 1926 * Warning: must be called when MAC is off!
1954 * 1927 *
1955 * Input parameters: 1928 * Input parameters:
1956 * s - sbmac structure 1929 * s - sbmac structure
1957 * duplex - duplex setting (see sbmac_duplex_t) 1930 * duplex - duplex setting (see sbmac_duplex_t)
1958 * fc - flow control setting (see sbmac_fc_t) 1931 * fc - flow control setting (see sbmac_fc_t)
1959 * 1932 *
1960 * Return value: 1933 * Return value:
1961 * 1 if ok 1934 * 1 if ok
1962 * 0 if an invalid parameter combination was specified 1935 * 0 if an invalid parameter combination was specified
@@ -1965,67 +1938,67 @@ static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1965static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc) 1938static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
1966{ 1939{
1967 uint64_t cfg; 1940 uint64_t cfg;
1968 1941
1969 /* 1942 /*
1970 * Save new current values 1943 * Save new current values
1971 */ 1944 */
1972 1945
1973 s->sbm_duplex = duplex; 1946 s->sbm_duplex = duplex;
1974 s->sbm_fc = fc; 1947 s->sbm_fc = fc;
1975 1948
1976 if (s->sbm_state == sbmac_state_on) 1949 if (s->sbm_state == sbmac_state_on)
1977 return 0; /* save for next restart */ 1950 return 0; /* save for next restart */
1978 1951
1979 /* 1952 /*
1980 * Read current register values 1953 * Read current register values
1981 */ 1954 */
1982 1955
1983 cfg = SBMAC_READCSR(s->sbm_maccfg); 1956 cfg = __raw_readq(s->sbm_maccfg);
1984 1957
1985 /* 1958 /*
1986 * Mask off the stuff we're about to change 1959 * Mask off the stuff we're about to change
1987 */ 1960 */
1988 1961
1989 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN); 1962 cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
1990 1963
1991 1964
1992 switch (duplex) { 1965 switch (duplex) {
1993 case sbmac_duplex_half: 1966 case sbmac_duplex_half:
1994 switch (fc) { 1967 switch (fc) {
1995 case sbmac_fc_disabled: 1968 case sbmac_fc_disabled:
1996 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED; 1969 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
1997 break; 1970 break;
1998 1971
1999 case sbmac_fc_collision: 1972 case sbmac_fc_collision:
2000 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED; 1973 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
2001 break; 1974 break;
2002 1975
2003 case sbmac_fc_carrier: 1976 case sbmac_fc_carrier:
2004 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR; 1977 cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
2005 break; 1978 break;
2006 1979
2007 case sbmac_fc_auto: /* XXX not implemented */ 1980 case sbmac_fc_auto: /* XXX not implemented */
2008 /* fall through */ 1981 /* fall through */
2009 case sbmac_fc_frame: /* not valid in half duplex */ 1982 case sbmac_fc_frame: /* not valid in half duplex */
2010 default: /* invalid selection */ 1983 default: /* invalid selection */
2011 return 0; 1984 return 0;
2012 } 1985 }
2013 break; 1986 break;
2014 1987
2015 case sbmac_duplex_full: 1988 case sbmac_duplex_full:
2016 switch (fc) { 1989 switch (fc) {
2017 case sbmac_fc_disabled: 1990 case sbmac_fc_disabled:
2018 cfg |= V_MAC_FC_CMD_DISABLED; 1991 cfg |= V_MAC_FC_CMD_DISABLED;
2019 break; 1992 break;
2020 1993
2021 case sbmac_fc_frame: 1994 case sbmac_fc_frame:
2022 cfg |= V_MAC_FC_CMD_ENABLED; 1995 cfg |= V_MAC_FC_CMD_ENABLED;
2023 break; 1996 break;
2024 1997
2025 case sbmac_fc_collision: /* not valid in full duplex */ 1998 case sbmac_fc_collision: /* not valid in full duplex */
2026 case sbmac_fc_carrier: /* not valid in full duplex */ 1999 case sbmac_fc_carrier: /* not valid in full duplex */
2027 case sbmac_fc_auto: /* XXX not implemented */ 2000 case sbmac_fc_auto: /* XXX not implemented */
2028 /* fall through */ 2001 /* fall through */
2029 default: 2002 default:
2030 return 0; 2003 return 0;
2031 } 2004 }
@@ -2034,13 +2007,13 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc
2034 /* XXX not implemented */ 2007 /* XXX not implemented */
2035 break; 2008 break;
2036 } 2009 }
2037 2010
2038 /* 2011 /*
2039 * Send the bits back to the hardware 2012 * Send the bits back to the hardware
2040 */ 2013 */
2041 2014
2042 SBMAC_WRITECSR(s->sbm_maccfg,cfg); 2015 __raw_writeq(cfg, s->sbm_maccfg);
2043 2016
2044 return 1; 2017 return 1;
2045} 2018}
2046 2019
@@ -2049,12 +2022,12 @@ static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc
2049 2022
2050/********************************************************************** 2023/**********************************************************************
2051 * SBMAC_INTR() 2024 * SBMAC_INTR()
2052 * 2025 *
2053 * Interrupt handler for MAC interrupts 2026 * Interrupt handler for MAC interrupts
2054 * 2027 *
2055 * Input parameters: 2028 * Input parameters:
2056 * MAC structure 2029 * MAC structure
2057 * 2030 *
2058 * Return value: 2031 * Return value:
2059 * nothing 2032 * nothing
2060 ********************************************************************* */ 2033 ********************************************************************* */
@@ -2066,27 +2039,27 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
2066 int handled = 0; 2039 int handled = 0;
2067 2040
2068 for (;;) { 2041 for (;;) {
2069 2042
2070 /* 2043 /*
2071 * Read the ISR (this clears the bits in the real 2044 * Read the ISR (this clears the bits in the real
2072 * register, except for counter addr) 2045 * register, except for counter addr)
2073 */ 2046 */
2074 2047
2075 isr = SBMAC_READCSR(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR; 2048 isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2076 2049
2077 if (isr == 0) 2050 if (isr == 0)
2078 break; 2051 break;
2079 2052
2080 handled = 1; 2053 handled = 1;
2081 2054
2082 /* 2055 /*
2083 * Transmits on channel 0 2056 * Transmits on channel 0
2084 */ 2057 */
2085 2058
2086 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) { 2059 if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
2087 sbdma_tx_process(sc,&(sc->sbm_txdma)); 2060 sbdma_tx_process(sc,&(sc->sbm_txdma));
2088 } 2061 }
2089 2062
2090 /* 2063 /*
2091 * Receives on channel 0 2064 * Receives on channel 0
2092 */ 2065 */
@@ -2106,8 +2079,8 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
2106 * EOP_SEEN here takes care of this case. 2079 * EOP_SEEN here takes care of this case.
2107 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0) 2080 * (EOP_SEEN is part of M_MAC_INT_CHANNEL << S_MAC_RX_CH0)
2108 */ 2081 */
2109 2082
2110 2083
2111 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) { 2084 if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2112 sbdma_rx_process(sc,&(sc->sbm_rxdma)); 2085 sbdma_rx_process(sc,&(sc->sbm_rxdma));
2113 } 2086 }
@@ -2118,29 +2091,29 @@ static irqreturn_t sbmac_intr(int irq,void *dev_instance,struct pt_regs *rgs)
2118 2091
2119/********************************************************************** 2092/**********************************************************************
2120 * SBMAC_START_TX(skb,dev) 2093 * SBMAC_START_TX(skb,dev)
2121 * 2094 *
2122 * Start output on the specified interface. Basically, we 2095 * Start output on the specified interface. Basically, we
2123 * queue as many buffers as we can until the ring fills up, or 2096 * queue as many buffers as we can until the ring fills up, or
2124 * we run off the end of the queue, whichever comes first. 2097 * we run off the end of the queue, whichever comes first.
2125 * 2098 *
2126 * Input parameters: 2099 * Input parameters:
2127 * 2100 *
2128 * 2101 *
2129 * Return value: 2102 * Return value:
2130 * nothing 2103 * nothing
2131 ********************************************************************* */ 2104 ********************************************************************* */
2132static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev) 2105static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2133{ 2106{
2134 struct sbmac_softc *sc = netdev_priv(dev); 2107 struct sbmac_softc *sc = netdev_priv(dev);
2135 2108
2136 /* lock eth irq */ 2109 /* lock eth irq */
2137 spin_lock_irq (&sc->sbm_lock); 2110 spin_lock_irq (&sc->sbm_lock);
2138 2111
2139 /* 2112 /*
2140 * Put the buffer on the transmit ring. If we 2113 * Put the buffer on the transmit ring. If we
2141 * don't have room, stop the queue. 2114 * don't have room, stop the queue.
2142 */ 2115 */
2143 2116
2144 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) { 2117 if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
2145 /* XXX save skb that we could not send */ 2118 /* XXX save skb that we could not send */
2146 netif_stop_queue(dev); 2119 netif_stop_queue(dev);
@@ -2148,24 +2121,24 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2148 2121
2149 return 1; 2122 return 1;
2150 } 2123 }
2151 2124
2152 dev->trans_start = jiffies; 2125 dev->trans_start = jiffies;
2153 2126
2154 spin_unlock_irq (&sc->sbm_lock); 2127 spin_unlock_irq (&sc->sbm_lock);
2155 2128
2156 return 0; 2129 return 0;
2157} 2130}
2158 2131
2159/********************************************************************** 2132/**********************************************************************
2160 * SBMAC_SETMULTI(sc) 2133 * SBMAC_SETMULTI(sc)
2161 * 2134 *
2162 * Reprogram the multicast table into the hardware, given 2135 * Reprogram the multicast table into the hardware, given
2163 * the list of multicasts associated with the interface 2136 * the list of multicasts associated with the interface
2164 * structure. 2137 * structure.
2165 * 2138 *
2166 * Input parameters: 2139 * Input parameters:
2167 * sc - softc 2140 * sc - softc
2168 * 2141 *
2169 * Return value: 2142 * Return value:
2170 * nothing 2143 * nothing
2171 ********************************************************************* */ 2144 ********************************************************************* */
@@ -2173,75 +2146,75 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2173static void sbmac_setmulti(struct sbmac_softc *sc) 2146static void sbmac_setmulti(struct sbmac_softc *sc)
2174{ 2147{
2175 uint64_t reg; 2148 uint64_t reg;
2176 sbmac_port_t port; 2149 volatile void __iomem *port;
2177 int idx; 2150 int idx;
2178 struct dev_mc_list *mclist; 2151 struct dev_mc_list *mclist;
2179 struct net_device *dev = sc->sbm_dev; 2152 struct net_device *dev = sc->sbm_dev;
2180 2153
2181 /* 2154 /*
2182 * Clear out entire multicast table. We do this by nuking 2155 * Clear out entire multicast table. We do this by nuking
2183 * the entire hash table and all the direct matches except 2156 * the entire hash table and all the direct matches except
2184 * the first one, which is used for our station address 2157 * the first one, which is used for our station address
2185 */ 2158 */
2186 2159
2187 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) { 2160 for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
2188 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t)); 2161 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
2189 SBMAC_WRITECSR(port,0); 2162 __raw_writeq(0, port);
2190 } 2163 }
2191 2164
2192 for (idx = 0; idx < MAC_HASH_COUNT; idx++) { 2165 for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
2193 port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t)); 2166 port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
2194 SBMAC_WRITECSR(port,0); 2167 __raw_writeq(0, port);
2195 } 2168 }
2196 2169
2197 /* 2170 /*
2198 * Clear the filter to say we don't want any multicasts. 2171 * Clear the filter to say we don't want any multicasts.
2199 */ 2172 */
2200 2173
2201 reg = SBMAC_READCSR(sc->sbm_rxfilter); 2174 reg = __raw_readq(sc->sbm_rxfilter);
2202 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN); 2175 reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2203 SBMAC_WRITECSR(sc->sbm_rxfilter,reg); 2176 __raw_writeq(reg, sc->sbm_rxfilter);
2204 2177
2205 if (dev->flags & IFF_ALLMULTI) { 2178 if (dev->flags & IFF_ALLMULTI) {
2206 /* 2179 /*
2207 * Enable ALL multicasts. Do this by inverting the 2180 * Enable ALL multicasts. Do this by inverting the
2208 * multicast enable bit. 2181 * multicast enable bit.
2209 */ 2182 */
2210 reg = SBMAC_READCSR(sc->sbm_rxfilter); 2183 reg = __raw_readq(sc->sbm_rxfilter);
2211 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN); 2184 reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2212 SBMAC_WRITECSR(sc->sbm_rxfilter,reg); 2185 __raw_writeq(reg, sc->sbm_rxfilter);
2213 return; 2186 return;
2214 } 2187 }
2215
2216 2188
2217 /* 2189
2190 /*
2218 * Progam new multicast entries. For now, only use the 2191 * Progam new multicast entries. For now, only use the
2219 * perfect filter. In the future we'll need to use the 2192 * perfect filter. In the future we'll need to use the
2220 * hash filter if the perfect filter overflows 2193 * hash filter if the perfect filter overflows
2221 */ 2194 */
2222 2195
2223 /* XXX only using perfect filter for now, need to use hash 2196 /* XXX only using perfect filter for now, need to use hash
2224 * XXX if the table overflows */ 2197 * XXX if the table overflows */
2225 2198
2226 idx = 1; /* skip station address */ 2199 idx = 1; /* skip station address */
2227 mclist = dev->mc_list; 2200 mclist = dev->mc_list;
2228 while (mclist && (idx < MAC_ADDR_COUNT)) { 2201 while (mclist && (idx < MAC_ADDR_COUNT)) {
2229 reg = sbmac_addr2reg(mclist->dmi_addr); 2202 reg = sbmac_addr2reg(mclist->dmi_addr);
2230 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); 2203 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2231 SBMAC_WRITECSR(port,reg); 2204 __raw_writeq(reg, port);
2232 idx++; 2205 idx++;
2233 mclist = mclist->next; 2206 mclist = mclist->next;
2234 } 2207 }
2235 2208
2236 /* 2209 /*
2237 * Enable the "accept multicast bits" if we programmed at least one 2210 * Enable the "accept multicast bits" if we programmed at least one
2238 * multicast. 2211 * multicast.
2239 */ 2212 */
2240 2213
2241 if (idx > 1) { 2214 if (idx > 1) {
2242 reg = SBMAC_READCSR(sc->sbm_rxfilter); 2215 reg = __raw_readq(sc->sbm_rxfilter);
2243 reg |= M_MAC_MCAST_EN; 2216 reg |= M_MAC_MCAST_EN;
2244 SBMAC_WRITECSR(sc->sbm_rxfilter,reg); 2217 __raw_writeq(reg, sc->sbm_rxfilter);
2245 } 2218 }
2246} 2219}
2247 2220
@@ -2250,12 +2223,12 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2250#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) 2223#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR)
2251/********************************************************************** 2224/**********************************************************************
2252 * SBMAC_PARSE_XDIGIT(str) 2225 * SBMAC_PARSE_XDIGIT(str)
2253 * 2226 *
2254 * Parse a hex digit, returning its value 2227 * Parse a hex digit, returning its value
2255 * 2228 *
2256 * Input parameters: 2229 * Input parameters:
2257 * str - character 2230 * str - character
2258 * 2231 *
2259 * Return value: 2232 * Return value:
2260 * hex value, or -1 if invalid 2233 * hex value, or -1 if invalid
2261 ********************************************************************* */ 2234 ********************************************************************* */
@@ -2263,7 +2236,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2263static int sbmac_parse_xdigit(char str) 2236static int sbmac_parse_xdigit(char str)
2264{ 2237{
2265 int digit; 2238 int digit;
2266 2239
2267 if ((str >= '0') && (str <= '9')) 2240 if ((str >= '0') && (str <= '9'))
2268 digit = str - '0'; 2241 digit = str - '0';
2269 else if ((str >= 'a') && (str <= 'f')) 2242 else if ((str >= 'a') && (str <= 'f'))
@@ -2272,20 +2245,20 @@ static int sbmac_parse_xdigit(char str)
2272 digit = str - 'A' + 10; 2245 digit = str - 'A' + 10;
2273 else 2246 else
2274 return -1; 2247 return -1;
2275 2248
2276 return digit; 2249 return digit;
2277} 2250}
2278 2251
2279/********************************************************************** 2252/**********************************************************************
2280 * SBMAC_PARSE_HWADDR(str,hwaddr) 2253 * SBMAC_PARSE_HWADDR(str,hwaddr)
2281 * 2254 *
2282 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte 2255 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2283 * Ethernet address. 2256 * Ethernet address.
2284 * 2257 *
2285 * Input parameters: 2258 * Input parameters:
2286 * str - string 2259 * str - string
2287 * hwaddr - pointer to hardware address 2260 * hwaddr - pointer to hardware address
2288 * 2261 *
2289 * Return value: 2262 * Return value:
2290 * 0 if ok, else -1 2263 * 0 if ok, else -1
2291 ********************************************************************* */ 2264 ********************************************************************* */
@@ -2294,7 +2267,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2294{ 2267{
2295 int digit1,digit2; 2268 int digit1,digit2;
2296 int idx = 6; 2269 int idx = 6;
2297 2270
2298 while (*str && (idx > 0)) { 2271 while (*str && (idx > 0)) {
2299 digit1 = sbmac_parse_xdigit(*str); 2272 digit1 = sbmac_parse_xdigit(*str);
2300 if (digit1 < 0) 2273 if (digit1 < 0)
@@ -2302,7 +2275,7 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2302 str++; 2275 str++;
2303 if (!*str) 2276 if (!*str)
2304 return -1; 2277 return -1;
2305 2278
2306 if ((*str == ':') || (*str == '-')) { 2279 if ((*str == ':') || (*str == '-')) {
2307 digit2 = digit1; 2280 digit2 = digit1;
2308 digit1 = 0; 2281 digit1 = 0;
@@ -2313,10 +2286,10 @@ static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2313 return -1; 2286 return -1;
2314 str++; 2287 str++;
2315 } 2288 }
2316 2289
2317 *hwaddr++ = (digit1 << 4) | digit2; 2290 *hwaddr++ = (digit1 << 4) | digit2;
2318 idx--; 2291 idx--;
2319 2292
2320 if (*str == '-') 2293 if (*str == '-')
2321 str++; 2294 str++;
2322 if (*str == ':') 2295 if (*str == ':')
@@ -2337,12 +2310,12 @@ static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2337 2310
2338/********************************************************************** 2311/**********************************************************************
2339 * SBMAC_INIT(dev) 2312 * SBMAC_INIT(dev)
2340 * 2313 *
2341 * Attach routine - init hardware and hook ourselves into linux 2314 * Attach routine - init hardware and hook ourselves into linux
2342 * 2315 *
2343 * Input parameters: 2316 * Input parameters:
2344 * dev - net_device structure 2317 * dev - net_device structure
2345 * 2318 *
2346 * Return value: 2319 * Return value:
2347 * status 2320 * status
2348 ********************************************************************* */ 2321 ********************************************************************* */
@@ -2354,53 +2327,53 @@ static int sbmac_init(struct net_device *dev, int idx)
2354 uint64_t ea_reg; 2327 uint64_t ea_reg;
2355 int i; 2328 int i;
2356 int err; 2329 int err;
2357 2330
2358 sc = netdev_priv(dev); 2331 sc = netdev_priv(dev);
2359 2332
2360 /* Determine controller base address */ 2333 /* Determine controller base address */
2361 2334
2362 sc->sbm_base = IOADDR(dev->base_addr); 2335 sc->sbm_base = IOADDR(dev->base_addr);
2363 sc->sbm_dev = dev; 2336 sc->sbm_dev = dev;
2364 sc->sbe_idx = idx; 2337 sc->sbe_idx = idx;
2365 2338
2366 eaddr = sc->sbm_hwaddr; 2339 eaddr = sc->sbm_hwaddr;
2367 2340
2368 /* 2341 /*
2369 * Read the ethernet address. The firwmare left this programmed 2342 * Read the ethernet address. The firwmare left this programmed
2370 * for us in the ethernet address register for each mac. 2343 * for us in the ethernet address register for each mac.
2371 */ 2344 */
2372 2345
2373 ea_reg = SBMAC_READCSR(sc->sbm_base + R_MAC_ETHERNET_ADDR); 2346 ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
2374 SBMAC_WRITECSR(sc->sbm_base + R_MAC_ETHERNET_ADDR, 0); 2347 __raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
2375 for (i = 0; i < 6; i++) { 2348 for (i = 0; i < 6; i++) {
2376 eaddr[i] = (uint8_t) (ea_reg & 0xFF); 2349 eaddr[i] = (uint8_t) (ea_reg & 0xFF);
2377 ea_reg >>= 8; 2350 ea_reg >>= 8;
2378 } 2351 }
2379 2352
2380 for (i = 0; i < 6; i++) { 2353 for (i = 0; i < 6; i++) {
2381 dev->dev_addr[i] = eaddr[i]; 2354 dev->dev_addr[i] = eaddr[i];
2382 } 2355 }
2383 2356
2384 2357
2385 /* 2358 /*
2386 * Init packet size 2359 * Init packet size
2387 */ 2360 */
2388 2361
2389 sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; 2362 sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
2390 2363
2391 /* 2364 /*
2392 * Initialize context (get pointers to registers and stuff), then 2365 * Initialize context (get pointers to registers and stuff), then
2393 * allocate the memory for the descriptor tables. 2366 * allocate the memory for the descriptor tables.
2394 */ 2367 */
2395 2368
2396 sbmac_initctx(sc); 2369 sbmac_initctx(sc);
2397 2370
2398 /* 2371 /*
2399 * Set up Linux device callins 2372 * Set up Linux device callins
2400 */ 2373 */
2401 2374
2402 spin_lock_init(&(sc->sbm_lock)); 2375 spin_lock_init(&(sc->sbm_lock));
2403 2376
2404 dev->open = sbmac_open; 2377 dev->open = sbmac_open;
2405 dev->hard_start_xmit = sbmac_start_tx; 2378 dev->hard_start_xmit = sbmac_start_tx;
2406 dev->stop = sbmac_close; 2379 dev->stop = sbmac_close;
@@ -2419,7 +2392,7 @@ static int sbmac_init(struct net_device *dev, int idx)
2419 if (err) 2392 if (err)
2420 goto out_uninit; 2393 goto out_uninit;
2421 2394
2422 if (periph_rev >= 2) { 2395 if (sc->rx_hw_checksum == ENABLE) {
2423 printk(KERN_INFO "%s: enabling TCP rcv checksum\n", 2396 printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
2424 sc->sbm_dev->name); 2397 sc->sbm_dev->name);
2425 } 2398 }
@@ -2430,10 +2403,10 @@ static int sbmac_init(struct net_device *dev, int idx)
2430 * was being displayed) 2403 * was being displayed)
2431 */ 2404 */
2432 printk(KERN_INFO 2405 printk(KERN_INFO
2433 "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n", 2406 "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
2434 dev->name, dev->base_addr, 2407 dev->name, dev->base_addr,
2435 eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]); 2408 eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
2436 2409
2437 2410
2438 return 0; 2411 return 0;
2439 2412
@@ -2447,54 +2420,86 @@ out_uninit:
2447static int sbmac_open(struct net_device *dev) 2420static int sbmac_open(struct net_device *dev)
2448{ 2421{
2449 struct sbmac_softc *sc = netdev_priv(dev); 2422 struct sbmac_softc *sc = netdev_priv(dev);
2450 2423
2451 if (debug > 1) { 2424 if (debug > 1) {
2452 printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq); 2425 printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
2453 } 2426 }
2454 2427
2455 /* 2428 /*
2456 * map/route interrupt (clear status first, in case something 2429 * map/route interrupt (clear status first, in case something
2457 * weird is pending; we haven't initialized the mac registers 2430 * weird is pending; we haven't initialized the mac registers
2458 * yet) 2431 * yet)
2459 */ 2432 */
2460 2433
2461 SBMAC_READCSR(sc->sbm_isr); 2434 __raw_readq(sc->sbm_isr);
2462 if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev)) 2435 if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
2463 return -EBUSY; 2436 return -EBUSY;
2464 2437
2465 /* 2438 /*
2466 * Configure default speed 2439 * Probe phy address
2440 */
2441
2442 if(sbmac_mii_probe(dev) == -1) {
2443 printk("%s: failed to probe PHY.\n", dev->name);
2444 return -EINVAL;
2445 }
2446
2447 /*
2448 * Configure default speed
2467 */ 2449 */
2468 2450
2469 sbmac_mii_poll(sc,noisy_mii); 2451 sbmac_mii_poll(sc,noisy_mii);
2470 2452
2471 /* 2453 /*
2472 * Turn on the channel 2454 * Turn on the channel
2473 */ 2455 */
2474 2456
2475 sbmac_set_channel_state(sc,sbmac_state_on); 2457 sbmac_set_channel_state(sc,sbmac_state_on);
2476 2458
2477 /* 2459 /*
2478 * XXX Station address is in dev->dev_addr 2460 * XXX Station address is in dev->dev_addr
2479 */ 2461 */
2480 2462
2481 if (dev->if_port == 0) 2463 if (dev->if_port == 0)
2482 dev->if_port = 0; 2464 dev->if_port = 0;
2483 2465
2484 netif_start_queue(dev); 2466 netif_start_queue(dev);
2485 2467
2486 sbmac_set_rx_mode(dev); 2468 sbmac_set_rx_mode(dev);
2487 2469
2488 /* Set the timer to check for link beat. */ 2470 /* Set the timer to check for link beat. */
2489 init_timer(&sc->sbm_timer); 2471 init_timer(&sc->sbm_timer);
2490 sc->sbm_timer.expires = jiffies + 2 * HZ/100; 2472 sc->sbm_timer.expires = jiffies + 2 * HZ/100;
2491 sc->sbm_timer.data = (unsigned long)dev; 2473 sc->sbm_timer.data = (unsigned long)dev;
2492 sc->sbm_timer.function = &sbmac_timer; 2474 sc->sbm_timer.function = &sbmac_timer;
2493 add_timer(&sc->sbm_timer); 2475 add_timer(&sc->sbm_timer);
2494 2476
2495 return 0; 2477 return 0;
2496} 2478}
2497 2479
2480static int sbmac_mii_probe(struct net_device *dev)
2481{
2482 int i;
2483 struct sbmac_softc *s = netdev_priv(dev);
2484 u16 bmsr, id1, id2;
2485 u32 vendor, device;
2486
2487 for (i=1; i<31; i++) {
2488 bmsr = sbmac_mii_read(s, i, MII_BMSR);
2489 if (bmsr != 0) {
2490 s->sbm_phys[0] = i;
2491 id1 = sbmac_mii_read(s, i, MII_PHYIDR1);
2492 id2 = sbmac_mii_read(s, i, MII_PHYIDR2);
2493 vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f);
2494 device = (id2 >> 4) & 0x3f;
2495
2496 printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n",
2497 dev->name, i, vendor, device);
2498 return i;
2499 }
2500 }
2501 return -1;
2502}
2498 2503
2499 2504
2500static int sbmac_mii_poll(struct sbmac_softc *s,int noisy) 2505static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
@@ -2609,20 +2614,20 @@ static void sbmac_timer(unsigned long data)
2609 int mii_status; 2614 int mii_status;
2610 2615
2611 spin_lock_irq (&sc->sbm_lock); 2616 spin_lock_irq (&sc->sbm_lock);
2612 2617
2613 /* make IFF_RUNNING follow the MII status bit "Link established" */ 2618 /* make IFF_RUNNING follow the MII status bit "Link established" */
2614 mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR); 2619 mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
2615 2620
2616 if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) { 2621 if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
2617 sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT; 2622 sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
2618 if (mii_status & BMSR_LINKSTAT) { 2623 if (mii_status & BMSR_LINKSTAT) {
2619 netif_carrier_on(dev); 2624 netif_carrier_on(dev);
2620 } 2625 }
2621 else { 2626 else {
2622 netif_carrier_off(dev); 2627 netif_carrier_off(dev);
2623 } 2628 }
2624 } 2629 }
2625 2630
2626 /* 2631 /*
2627 * Poll the PHY to see what speed we should be running at 2632 * Poll the PHY to see what speed we should be running at
2628 */ 2633 */
@@ -2640,9 +2645,9 @@ static void sbmac_timer(unsigned long data)
2640 sbmac_channel_start(sc); 2645 sbmac_channel_start(sc);
2641 } 2646 }
2642 } 2647 }
2643 2648
2644 spin_unlock_irq (&sc->sbm_lock); 2649 spin_unlock_irq (&sc->sbm_lock);
2645 2650
2646 sc->sbm_timer.expires = jiffies + next_tick; 2651 sc->sbm_timer.expires = jiffies + next_tick;
2647 add_timer(&sc->sbm_timer); 2652 add_timer(&sc->sbm_timer);
2648} 2653}
@@ -2651,13 +2656,13 @@ static void sbmac_timer(unsigned long data)
2651static void sbmac_tx_timeout (struct net_device *dev) 2656static void sbmac_tx_timeout (struct net_device *dev)
2652{ 2657{
2653 struct sbmac_softc *sc = netdev_priv(dev); 2658 struct sbmac_softc *sc = netdev_priv(dev);
2654 2659
2655 spin_lock_irq (&sc->sbm_lock); 2660 spin_lock_irq (&sc->sbm_lock);
2656 2661
2657 2662
2658 dev->trans_start = jiffies; 2663 dev->trans_start = jiffies;
2659 sc->sbm_stats.tx_errors++; 2664 sc->sbm_stats.tx_errors++;
2660 2665
2661 spin_unlock_irq (&sc->sbm_lock); 2666 spin_unlock_irq (&sc->sbm_lock);
2662 2667
2663 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name); 2668 printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
@@ -2670,13 +2675,13 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
2670{ 2675{
2671 struct sbmac_softc *sc = netdev_priv(dev); 2676 struct sbmac_softc *sc = netdev_priv(dev);
2672 unsigned long flags; 2677 unsigned long flags;
2673 2678
2674 spin_lock_irqsave(&sc->sbm_lock, flags); 2679 spin_lock_irqsave(&sc->sbm_lock, flags);
2675 2680
2676 /* XXX update other stats here */ 2681 /* XXX update other stats here */
2677 2682
2678 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2683 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2679 2684
2680 return &sc->sbm_stats; 2685 return &sc->sbm_stats;
2681} 2686}
2682 2687
@@ -2693,8 +2698,8 @@ static void sbmac_set_rx_mode(struct net_device *dev)
2693 /* 2698 /*
2694 * Promiscuous changed. 2699 * Promiscuous changed.
2695 */ 2700 */
2696 2701
2697 if (dev->flags & IFF_PROMISC) { 2702 if (dev->flags & IFF_PROMISC) {
2698 /* Unconditionally log net taps. */ 2703 /* Unconditionally log net taps. */
2699 msg_flag = 1; 2704 msg_flag = 1;
2700 sbmac_promiscuous_mode(sc,1); 2705 sbmac_promiscuous_mode(sc,1);
@@ -2705,18 +2710,18 @@ static void sbmac_set_rx_mode(struct net_device *dev)
2705 } 2710 }
2706 } 2711 }
2707 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2712 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2708 2713
2709 if (msg_flag) { 2714 if (msg_flag) {
2710 printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n", 2715 printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
2711 dev->name,(msg_flag==1)?"en":"dis"); 2716 dev->name,(msg_flag==1)?"en":"dis");
2712 } 2717 }
2713 2718
2714 /* 2719 /*
2715 * Program the multicasts. Do this every time. 2720 * Program the multicasts. Do this every time.
2716 */ 2721 */
2717 2722
2718 sbmac_setmulti(sc); 2723 sbmac_setmulti(sc);
2719 2724
2720} 2725}
2721 2726
2722static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2727static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -2725,10 +2730,10 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2725 u16 *data = (u16 *)&rq->ifr_ifru; 2730 u16 *data = (u16 *)&rq->ifr_ifru;
2726 unsigned long flags; 2731 unsigned long flags;
2727 int retval; 2732 int retval;
2728 2733
2729 spin_lock_irqsave(&sc->sbm_lock, flags); 2734 spin_lock_irqsave(&sc->sbm_lock, flags);
2730 retval = 0; 2735 retval = 0;
2731 2736
2732 switch(cmd) { 2737 switch(cmd) {
2733 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */ 2738 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2734 data[0] = sc->sbm_phys[0] & 0x1f; 2739 data[0] = sc->sbm_phys[0] & 0x1f;
@@ -2750,7 +2755,7 @@ static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2750 default: 2755 default:
2751 retval = -EOPNOTSUPP; 2756 retval = -EOPNOTSUPP;
2752 } 2757 }
2753 2758
2754 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2759 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2755 return retval; 2760 return retval;
2756} 2761}
@@ -2781,7 +2786,7 @@ static int sbmac_close(struct net_device *dev)
2781 2786
2782 sbdma_emptyring(&(sc->sbm_txdma)); 2787 sbdma_emptyring(&(sc->sbm_txdma));
2783 sbdma_emptyring(&(sc->sbm_rxdma)); 2788 sbdma_emptyring(&(sc->sbm_rxdma));
2784 2789
2785 return 0; 2790 return 0;
2786} 2791}
2787 2792
@@ -2793,13 +2798,13 @@ sbmac_setup_hwaddr(int chan,char *addr)
2793{ 2798{
2794 uint8_t eaddr[6]; 2799 uint8_t eaddr[6];
2795 uint64_t val; 2800 uint64_t val;
2796 sbmac_port_t port; 2801 unsigned long port;
2797 2802
2798 port = A_MAC_CHANNEL_BASE(chan); 2803 port = A_MAC_CHANNEL_BASE(chan);
2799 sbmac_parse_hwaddr(addr,eaddr); 2804 sbmac_parse_hwaddr(addr,eaddr);
2800 val = sbmac_addr2reg(eaddr); 2805 val = sbmac_addr2reg(eaddr);
2801 SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR),val); 2806 __raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR));
2802 val = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR)); 2807 val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2803} 2808}
2804#endif 2809#endif
2805 2810
@@ -2810,9 +2815,9 @@ sbmac_init_module(void)
2810{ 2815{
2811 int idx; 2816 int idx;
2812 struct net_device *dev; 2817 struct net_device *dev;
2813 sbmac_port_t port; 2818 unsigned long port;
2814 int chip_max_units; 2819 int chip_max_units;
2815 2820
2816 /* 2821 /*
2817 * For bringup when not using the firmware, we can pre-fill 2822 * For bringup when not using the firmware, we can pre-fill
2818 * the MAC addresses using the environment variables 2823 * the MAC addresses using the environment variables
@@ -2858,13 +2863,13 @@ sbmac_init_module(void)
2858 2863
2859 port = A_MAC_CHANNEL_BASE(idx); 2864 port = A_MAC_CHANNEL_BASE(idx);
2860 2865
2861 /* 2866 /*
2862 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero 2867 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2863 * value for us by the firmware if we're going to use this MAC. 2868 * value for us by the firmware if we're going to use this MAC.
2864 * If we find a zero, skip this MAC. 2869 * If we find a zero, skip this MAC.
2865 */ 2870 */
2866 2871
2867 sbmac_orig_hwaddr[idx] = SBMAC_READCSR(IOADDR(port+R_MAC_ETHERNET_ADDR)); 2872 sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2868 if (sbmac_orig_hwaddr[idx] == 0) { 2873 if (sbmac_orig_hwaddr[idx] == 0) {
2869 printk(KERN_DEBUG "sbmac: not configuring MAC at " 2874 printk(KERN_DEBUG "sbmac: not configuring MAC at "
2870 "%lx\n", port); 2875 "%lx\n", port);
@@ -2876,7 +2881,7 @@ sbmac_init_module(void)
2876 */ 2881 */
2877 2882
2878 dev = alloc_etherdev(sizeof(struct sbmac_softc)); 2883 dev = alloc_etherdev(sizeof(struct sbmac_softc));
2879 if (!dev) 2884 if (!dev)
2880 return -ENOMEM; /* return ENOMEM */ 2885 return -ENOMEM; /* return ENOMEM */
2881 2886
2882 printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port); 2887 printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
@@ -2886,8 +2891,7 @@ sbmac_init_module(void)
2886 dev->mem_end = 0; 2891 dev->mem_end = 0;
2887 if (sbmac_init(dev, idx)) { 2892 if (sbmac_init(dev, idx)) {
2888 port = A_MAC_CHANNEL_BASE(idx); 2893 port = A_MAC_CHANNEL_BASE(idx);
2889 SBMAC_WRITECSR(IOADDR(port+R_MAC_ETHERNET_ADDR), 2894 __raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR));
2890 sbmac_orig_hwaddr[idx]);
2891 free_netdev(dev); 2895 free_netdev(dev);
2892 continue; 2896 continue;
2893 } 2897 }
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9bc3b1c0dd6a..a4614df38a90 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -32,8 +32,6 @@
32 32
33#include "sgiseeq.h" 33#include "sgiseeq.h"
34 34
35static char *version = "sgiseeq.c: David S. Miller (dm@engr.sgi.com)\n";
36
37static char *sgiseeqstr = "SGI Seeq8003"; 35static char *sgiseeqstr = "SGI Seeq8003";
38 36
39/* 37/*
@@ -113,9 +111,9 @@ static struct net_device *root_sgiseeq_dev;
113 111
114static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs) 112static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
115{ 113{
116 hregs->rx_reset = HPC3_ERXRST_CRESET | HPC3_ERXRST_CLRIRQ; 114 hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
117 udelay(20); 115 udelay(20);
118 hregs->rx_reset = 0; 116 hregs->reset = 0;
119} 117}
120 118
121static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs, 119static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
@@ -252,7 +250,6 @@ void sgiseeq_dump_rings(void)
252 250
253#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF) 251#define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
254#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2) 252#define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
255#define RDMACFG_INIT (HPC3_ERXDCFG_FRXDC | HPC3_ERXDCFG_FEOP | HPC3_ERXDCFG_FIRQ)
256 253
257static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp, 254static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
258 struct sgiseeq_regs *sregs) 255 struct sgiseeq_regs *sregs)
@@ -274,8 +271,6 @@ static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
274 sregs->tstat = TSTAT_INIT_SEEQ; 271 sregs->tstat = TSTAT_INIT_SEEQ;
275 } 272 }
276 273
277 hregs->rx_dconfig |= RDMACFG_INIT;
278
279 hregs->rx_ndptr = CPHYSADDR(sp->rx_desc); 274 hregs->rx_ndptr = CPHYSADDR(sp->rx_desc);
280 hregs->tx_ndptr = CPHYSADDR(sp->tx_desc); 275 hregs->tx_ndptr = CPHYSADDR(sp->tx_desc);
281 276
@@ -446,7 +441,7 @@ static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id, struct pt_regs *regs
446 spin_lock(&sp->tx_lock); 441 spin_lock(&sp->tx_lock);
447 442
448 /* Ack the IRQ and set software state. */ 443 /* Ack the IRQ and set software state. */
449 hregs->rx_reset = HPC3_ERXRST_CLRIRQ; 444 hregs->reset = HPC3_ERST_CLRIRQ;
450 445
451 /* Always check for received packets. */ 446 /* Always check for received packets. */
452 sgiseeq_rx(dev, sp, hregs, sregs); 447 sgiseeq_rx(dev, sp, hregs, sregs);
@@ -493,11 +488,13 @@ static int sgiseeq_close(struct net_device *dev)
493{ 488{
494 struct sgiseeq_private *sp = netdev_priv(dev); 489 struct sgiseeq_private *sp = netdev_priv(dev);
495 struct sgiseeq_regs *sregs = sp->sregs; 490 struct sgiseeq_regs *sregs = sp->sregs;
491 unsigned int irq = dev->irq;
496 492
497 netif_stop_queue(dev); 493 netif_stop_queue(dev);
498 494
499 /* Shutdown the Seeq. */ 495 /* Shutdown the Seeq. */
500 reset_hpc3_and_seeq(sp->hregs, sregs); 496 reset_hpc3_and_seeq(sp->hregs, sregs);
497 free_irq(irq, dev);
501 498
502 return 0; 499 return 0;
503} 500}
@@ -644,7 +641,7 @@ static inline void setup_rx_ring(struct sgiseeq_rx_desc *buf, int nbufs)
644 641
645#define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf)) 642#define ALIGNED(x) ((((unsigned long)(x)) + 0xf) & ~(0xf))
646 643
647static int sgiseeq_init(struct hpc3_regs* regs, int irq) 644static int sgiseeq_init(struct hpc3_regs* hpcregs, int irq)
648{ 645{
649 struct sgiseeq_init_block *sr; 646 struct sgiseeq_init_block *sr;
650 struct sgiseeq_private *sp; 647 struct sgiseeq_private *sp;
@@ -680,8 +677,8 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
680 gpriv = sp; 677 gpriv = sp;
681 gdev = dev; 678 gdev = dev;
682#endif 679#endif
683 sp->sregs = (struct sgiseeq_regs *) &hpc3c0->eth_ext[0]; 680 sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
684 sp->hregs = &hpc3c0->ethregs; 681 sp->hregs = &hpcregs->ethregs;
685 sp->name = sgiseeqstr; 682 sp->name = sgiseeqstr;
686 sp->mode = SEEQ_RCMD_RBCAST; 683 sp->mode = SEEQ_RCMD_RBCAST;
687 684
@@ -698,6 +695,11 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
698 setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS); 695 setup_rx_ring(sp->rx_desc, SEEQ_RX_BUFFERS);
699 setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS); 696 setup_tx_ring(sp->tx_desc, SEEQ_TX_BUFFERS);
700 697
698 /* Setup PIO and DMA transfer timing */
699 sp->hregs->pconfig = 0x161;
700 sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
701 HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
702
701 /* Reset the chip. */ 703 /* Reset the chip. */
702 hpc3_eth_reset(sp->hregs); 704 hpc3_eth_reset(sp->hregs);
703 705
@@ -724,7 +726,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
724 goto err_out_free_page; 726 goto err_out_free_page;
725 } 727 }
726 728
727 printk(KERN_INFO "%s: SGI Seeq8003 ", dev->name); 729 printk(KERN_INFO "%s: %s ", dev->name, sgiseeqstr);
728 for (i = 0; i < 6; i++) 730 for (i = 0; i < 6; i++)
729 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':'); 731 printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
730 732
@@ -734,7 +736,7 @@ static int sgiseeq_init(struct hpc3_regs* regs, int irq)
734 return 0; 736 return 0;
735 737
736err_out_free_page: 738err_out_free_page:
737 free_page((unsigned long) sp); 739 free_page((unsigned long) sp->srings);
738err_out_free_dev: 740err_out_free_dev:
739 kfree(dev); 741 kfree(dev);
740 742
@@ -744,8 +746,6 @@ err_out:
744 746
745static int __init sgiseeq_probe(void) 747static int __init sgiseeq_probe(void)
746{ 748{
747 printk(version);
748
749 /* On board adapter on 1st HPC is always present */ 749 /* On board adapter on 1st HPC is always present */
750 return sgiseeq_init(hpc3c0, SGI_ENET_IRQ); 750 return sgiseeq_init(hpc3c0, SGI_ENET_IRQ);
751} 751}
@@ -754,15 +754,12 @@ static void __exit sgiseeq_exit(void)
754{ 754{
755 struct net_device *next, *dev; 755 struct net_device *next, *dev;
756 struct sgiseeq_private *sp; 756 struct sgiseeq_private *sp;
757 int irq;
758 757
759 for (dev = root_sgiseeq_dev; dev; dev = next) { 758 for (dev = root_sgiseeq_dev; dev; dev = next) {
760 sp = (struct sgiseeq_private *) netdev_priv(dev); 759 sp = (struct sgiseeq_private *) netdev_priv(dev);
761 next = sp->next_module; 760 next = sp->next_module;
762 irq = dev->irq;
763 unregister_netdev(dev); 761 unregister_netdev(dev);
764 free_irq(irq, dev); 762 free_page((unsigned long) sp->srings);
765 free_page((unsigned long) sp);
766 free_netdev(dev); 763 free_netdev(dev);
767 } 764 }
768} 765}
@@ -770,4 +767,6 @@ static void __exit sgiseeq_exit(void)
770module_init(sgiseeq_probe); 767module_init(sgiseeq_probe);
771module_exit(sgiseeq_exit); 768module_exit(sgiseeq_exit);
772 769
770MODULE_DESCRIPTION("SGI Seeq 8003 driver");
771MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
773MODULE_LICENSE("GPL"); 772MODULE_LICENSE("GPL");
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 92f75529eff8..478791e09bf7 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -842,7 +842,7 @@ static void sis190_set_rx_mode(struct net_device *dev)
842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 842 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
843 i++, mclist = mclist->next) { 843 i++, mclist = mclist->next) {
844 int bit_nr = 844 int bit_nr =
845 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 845 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 846 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
847 rx_mode |= AcceptMulticast; 847 rx_mode |= AcceptMulticast;
848 } 848 }
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 23b713c700b3..1d4d88680db1 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1696,15 +1696,20 @@ static int sis900_rx(struct net_device *net_dev)
1696 long ioaddr = net_dev->base_addr; 1696 long ioaddr = net_dev->base_addr;
1697 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1697 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1698 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1698 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1699 int rx_work_limit;
1699 1700
1700 if (netif_msg_rx_status(sis_priv)) 1701 if (netif_msg_rx_status(sis_priv))
1701 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d " 1702 printk(KERN_DEBUG "sis900_rx, cur_rx:%4.4d, dirty_rx:%4.4d "
1702 "status:0x%8.8x\n", 1703 "status:0x%8.8x\n",
1703 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status); 1704 sis_priv->cur_rx, sis_priv->dirty_rx, rx_status);
1705 rx_work_limit = sis_priv->dirty_rx + NUM_RX_DESC - sis_priv->cur_rx;
1704 1706
1705 while (rx_status & OWN) { 1707 while (rx_status & OWN) {
1706 unsigned int rx_size; 1708 unsigned int rx_size;
1707 1709
1710 if (--rx_work_limit < 0)
1711 break;
1712
1708 rx_size = (rx_status & DSIZE) - CRC_SIZE; 1713 rx_size = (rx_status & DSIZE) - CRC_SIZE;
1709 1714
1710 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) { 1715 if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
@@ -1732,9 +1737,11 @@ static int sis900_rx(struct net_device *net_dev)
1732 we are working on NULL sk_buff :-( */ 1737 we are working on NULL sk_buff :-( */
1733 if (sis_priv->rx_skbuff[entry] == NULL) { 1738 if (sis_priv->rx_skbuff[entry] == NULL) {
1734 if (netif_msg_rx_err(sis_priv)) 1739 if (netif_msg_rx_err(sis_priv))
1735 printk(KERN_INFO "%s: NULL pointer " 1740 printk(KERN_WARNING "%s: NULL pointer "
1736 "encountered in Rx ring, skipping\n", 1741 "encountered in Rx ring\n"
1737 net_dev->name); 1742 "cur_rx:%4.4d, dirty_rx:%4.4d\n",
1743 net_dev->name, sis_priv->cur_rx,
1744 sis_priv->dirty_rx);
1738 break; 1745 break;
1739 } 1746 }
1740 1747
@@ -1770,6 +1777,7 @@ static int sis900_rx(struct net_device *net_dev)
1770 sis_priv->rx_ring[entry].cmdsts = 0; 1777 sis_priv->rx_ring[entry].cmdsts = 0;
1771 sis_priv->rx_ring[entry].bufptr = 0; 1778 sis_priv->rx_ring[entry].bufptr = 0;
1772 sis_priv->stats.rx_dropped++; 1779 sis_priv->stats.rx_dropped++;
1780 sis_priv->cur_rx++;
1773 break; 1781 break;
1774 } 1782 }
1775 skb->dev = net_dev; 1783 skb->dev = net_dev;
@@ -1787,7 +1795,7 @@ static int sis900_rx(struct net_device *net_dev)
1787 1795
1788 /* refill the Rx buffer, what if the rate of refilling is slower 1796 /* refill the Rx buffer, what if the rate of refilling is slower
1789 * than consuming ?? */ 1797 * than consuming ?? */
1790 for (;sis_priv->cur_rx - sis_priv->dirty_rx > 0; sis_priv->dirty_rx++) { 1798 for (; sis_priv->cur_rx != sis_priv->dirty_rx; sis_priv->dirty_rx++) {
1791 struct sk_buff *skb; 1799 struct sk_buff *skb;
1792 1800
1793 entry = sis_priv->dirty_rx % NUM_RX_DESC; 1801 entry = sis_priv->dirty_rx % NUM_RX_DESC;
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index f17c05cbe44b..99a776a51fb5 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -1896,7 +1896,7 @@ void smt_swap_para(struct smt_header *sm, int len, int direction)
1896 1896
1897static void smt_string_swap(char *data, const char *format, int len) 1897static void smt_string_swap(char *data, const char *format, int len)
1898{ 1898{
1899 const char *open_paren = 0 ; 1899 const char *open_paren = NULL ;
1900 int x ; 1900 int x ;
1901 1901
1902 while (len > 0 && *format) { 1902 while (len > 0 && *format) {
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index c2e6484ef138..572f121b1f4e 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -730,6 +730,7 @@ static struct ethtool_ops skge_ethtool_ops = {
730 .phys_id = skge_phys_id, 730 .phys_id = skge_phys_id,
731 .get_stats_count = skge_get_stats_count, 731 .get_stats_count = skge_get_stats_count,
732 .get_ethtool_stats = skge_get_ethtool_stats, 732 .get_ethtool_stats = skge_get_ethtool_stats,
733 .get_perm_addr = ethtool_op_get_perm_addr,
733}; 734};
734 735
735/* 736/*
@@ -3096,6 +3097,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3096 3097
3097 /* read the mac address */ 3098 /* read the mac address */
3098 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3099 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3100 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3099 3101
3100 /* device is off until link detection */ 3102 /* device is off until link detection */
3101 netif_carrier_off(dev); 3103 netif_carrier_off(dev);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 1438fdd20826..c573bb351d4c 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1983,6 +1983,10 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
1983 if (lp->version >= (CHIP_91100 << 4)) 1983 if (lp->version >= (CHIP_91100 << 4))
1984 smc_phy_detect(dev); 1984 smc_phy_detect(dev);
1985 1985
1986 /* then shut everything down to save power */
1987 smc_shutdown(dev);
1988 smc_phy_powerdown(dev);
1989
1986 /* Set default parameters */ 1990 /* Set default parameters */
1987 lp->msg_enable = NETIF_MSG_LINK; 1991 lp->msg_enable = NETIF_MSG_LINK;
1988 lp->ctl_rfduplx = 0; 1992 lp->ctl_rfduplx = 0;
@@ -2291,11 +2295,11 @@ static int smc_drv_remove(struct device *dev)
2291 return 0; 2295 return 0;
2292} 2296}
2293 2297
2294static int smc_drv_suspend(struct device *dev, pm_message_t state, u32 level) 2298static int smc_drv_suspend(struct device *dev, pm_message_t state)
2295{ 2299{
2296 struct net_device *ndev = dev_get_drvdata(dev); 2300 struct net_device *ndev = dev_get_drvdata(dev);
2297 2301
2298 if (ndev && level == SUSPEND_DISABLE) { 2302 if (ndev) {
2299 if (netif_running(ndev)) { 2303 if (netif_running(ndev)) {
2300 netif_device_detach(ndev); 2304 netif_device_detach(ndev);
2301 smc_shutdown(ndev); 2305 smc_shutdown(ndev);
@@ -2305,12 +2309,12 @@ static int smc_drv_suspend(struct device *dev, pm_message_t state, u32 level)
2305 return 0; 2309 return 0;
2306} 2310}
2307 2311
2308static int smc_drv_resume(struct device *dev, u32 level) 2312static int smc_drv_resume(struct device *dev)
2309{ 2313{
2310 struct platform_device *pdev = to_platform_device(dev); 2314 struct platform_device *pdev = to_platform_device(dev);
2311 struct net_device *ndev = dev_get_drvdata(dev); 2315 struct net_device *ndev = dev_get_drvdata(dev);
2312 2316
2313 if (ndev && level == RESUME_ENABLE) { 2317 if (ndev) {
2314 struct smc_local *lp = netdev_priv(ndev); 2318 struct smc_local *lp = netdev_priv(ndev);
2315 smc_enable_device(pdev); 2319 smc_enable_device(pdev);
2316 if (netif_running(ndev)) { 2320 if (netif_running(ndev)) {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index ac9ce6509eee..817f200742c3 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -230,12 +230,12 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
230#define SMC_CAN_USE_16BIT 1 230#define SMC_CAN_USE_16BIT 1
231#define SMC_CAN_USE_32BIT 0 231#define SMC_CAN_USE_32BIT 0
232 232
233#define SMC_inb(a, r) inb((a) + (r) - 0xa0000000) 233#define SMC_inb(a, r) inb((u32)a) + (r))
234#define SMC_inw(a, r) inw((a) + (r) - 0xa0000000) 234#define SMC_inw(a, r) inw(((u32)a) + (r))
235#define SMC_outb(v, a, r) outb(v, (a) + (r) - 0xa0000000) 235#define SMC_outb(v, a, r) outb(v, ((u32)a) + (r))
236#define SMC_outw(v, a, r) outw(v, (a) + (r) - 0xa0000000) 236#define SMC_outw(v, a, r) outw(v, ((u32)a) + (r))
237#define SMC_insw(a, r, p, l) insw((a) + (r) - 0xa0000000, p, l) 237#define SMC_insw(a, r, p, l) insw(((u32)a) + (r), p, l)
238#define SMC_outsw(a, r, p, l) outsw((a) + (r) - 0xa0000000, p, l) 238#define SMC_outsw(a, r, p, l) outsw(((u32)a) + (r), p, l)
239 239
240#define set_irq_type(irq, type) do {} while(0) 240#define set_irq_type(irq, type) do {} while(0)
241 241
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index efdb179ecc8c..38b2b0a3ce96 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1091,8 +1091,10 @@ static int netdev_open(struct net_device *dev)
1091 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE; 1091 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1092 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 1092 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1093 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 1093 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1094 if (np->queue_mem == 0) 1094 if (np->queue_mem == NULL) {
1095 free_irq(dev->irq, dev);
1095 return -ENOMEM; 1096 return -ENOMEM;
1097 }
1096 1098
1097 np->tx_done_q = np->queue_mem; 1099 np->tx_done_q = np->queue_mem;
1098 np->tx_done_q_dma = np->queue_mem_dma; 1100 np->tx_done_q_dma = np->queue_mem_dma;
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index f88f5e32b714..cfaf47c63c58 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -214,7 +214,8 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
214{ 214{
215 struct bmac_init_block *bb = bp->bmac_block; 215 struct bmac_init_block *bb = bp->bmac_block;
216 struct net_device *dev = bp->dev; 216 struct net_device *dev = bp->dev;
217 int i, gfp_flags = GFP_KERNEL; 217 int i;
218 gfp_t gfp_flags = GFP_KERNEL;
218 219
219 if (from_irq || in_interrupt()) 220 if (from_irq || in_interrupt())
220 gfp_flags = GFP_ATOMIC; 221 gfp_flags = GFP_ATOMIC;
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
index 5674003fc38a..b0dbc5187143 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/sunbmac.h
@@ -339,7 +339,7 @@ struct bigmac {
339#define ALIGNED_RX_SKB_ADDR(addr) \ 339#define ALIGNED_RX_SKB_ADDR(addr) \
340 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr)) 340 ((((unsigned long)(addr) + (64 - 1)) & ~(64 - 1)) - (unsigned long)(addr))
341 341
342static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, int gfp_flags) 342static inline struct sk_buff *big_mac_alloc_skb(unsigned int length, gfp_t gfp_flags)
343{ 343{
344 struct sk_buff *skb; 344 struct sk_buff *skb;
345 345
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d500a5771dbc..0ab9c38b4a34 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -80,7 +80,7 @@
80 I/O access could affect performance in ARM-based system 80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support 81 - Add Linux software VLAN support
82 82
83 Version LK1.08 (D-Link): 83 Version LK1.08 (Philippe De Muyter phdm@macqel.be):
84 - Fix bug of custom mac address 84 - Fix bug of custom mac address
85 (StationAddr register only accept word write) 85 (StationAddr register only accept word write)
86 86
@@ -91,11 +91,14 @@
91 Version LK1.09a (ICPlus): 91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM 92 - Add the delay time in reading the contents of EEPROM
93 93
94 Version LK1.10 (Philippe De Muyter phdm@macqel.be):
95 - Make 'unblock interface after Tx underrun' work
96
94*/ 97*/
95 98
96#define DRV_NAME "sundance" 99#define DRV_NAME "sundance"
97#define DRV_VERSION "1.01+LK1.09a" 100#define DRV_VERSION "1.01+LK1.10"
98#define DRV_RELDATE "10-Jul-2003" 101#define DRV_RELDATE "28-Oct-2005"
99 102
100 103
101/* The user-configurable values. 104/* The user-configurable values.
@@ -263,8 +266,10 @@ IV. Notes
263IVb. References 266IVb. References
264 267
265The Sundance ST201 datasheet, preliminary version. 268The Sundance ST201 datasheet, preliminary version.
266http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html 269The Kendin KS8723 datasheet, preliminary version.
267http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html 270The ICplus IP100 datasheet, preliminary version.
271http://www.scyld.com/expert/100mbps.html
272http://www.scyld.com/expert/NWay.html
268 273
269IVc. Errata 274IVc. Errata
270 275
@@ -500,6 +505,25 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static int netdev_close(struct net_device *dev); 505static int netdev_close(struct net_device *dev);
501static struct ethtool_ops ethtool_ops; 506static struct ethtool_ops ethtool_ops;
502 507
508static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
509{
510 struct netdev_private *np = netdev_priv(dev);
511 void __iomem *ioaddr = np->base + ASICCtrl;
512 int countdown;
513
514 /* ST201 documentation states ASICCtrl is a 32bit register */
515 iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr);
516 /* ST201 documentation states reset can take up to 1 ms */
517 countdown = 10 + 1;
518 while (ioread32 (ioaddr) & (ResetBusy << 16)) {
519 if (--countdown == 0) {
520 printk(KERN_WARNING "%s : reset not completed !!\n", dev->name);
521 break;
522 }
523 udelay(100);
524 }
525}
526
503static int __devinit sundance_probe1 (struct pci_dev *pdev, 527static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent) 528 const struct pci_device_id *ent)
505{ 529{
@@ -518,6 +542,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
518#else 542#else
519 int bar = 1; 543 int bar = 1;
520#endif 544#endif
545 int phy, phy_idx = 0;
521 546
522 547
523/* when built into the kernel, we only print version if device is found */ 548/* when built into the kernel, we only print version if device is found */
@@ -549,6 +574,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
549 for (i = 0; i < 3; i++) 574 for (i = 0; i < 3; i++)
550 ((u16 *)dev->dev_addr)[i] = 575 ((u16 *)dev->dev_addr)[i] =
551 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); 576 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
577 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
552 578
553 dev->base_addr = (unsigned long)ioaddr; 579 dev->base_addr = (unsigned long)ioaddr;
554 dev->irq = irq; 580 dev->irq = irq;
@@ -605,33 +631,31 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
605 printk("%2.2x:", dev->dev_addr[i]); 631 printk("%2.2x:", dev->dev_addr[i]);
606 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq); 632 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
607 633
608 if (1) { 634 np->phys[0] = 1; /* Default setting */
609 int phy, phy_idx = 0; 635 np->mii_preamble_required++;
610 np->phys[0] = 1; /* Default setting */ 636 for (phy = 1; phy <= 32 && phy_idx < MII_CNT; phy++) {
611 np->mii_preamble_required++; 637 int mii_status = mdio_read(dev, phy, MII_BMSR);
612 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { 638 int phyx = phy & 0x1f;
613 int mii_status = mdio_read(dev, phy, MII_BMSR); 639 if (mii_status != 0xffff && mii_status != 0x0000) {
614 if (mii_status != 0xffff && mii_status != 0x0000) { 640 np->phys[phy_idx++] = phyx;
615 np->phys[phy_idx++] = phy; 641 np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE);
616 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); 642 if ((mii_status & 0x0040) == 0)
617 if ((mii_status & 0x0040) == 0) 643 np->mii_preamble_required++;
618 np->mii_preamble_required++; 644 printk(KERN_INFO "%s: MII PHY found at address %d, status "
619 printk(KERN_INFO "%s: MII PHY found at address %d, status " 645 "0x%4.4x advertising %4.4x.\n",
620 "0x%4.4x advertising %4.4x.\n", 646 dev->name, phyx, mii_status, np->mii_if.advertising);
621 dev->name, phy, mii_status, np->mii_if.advertising);
622 }
623 }
624 np->mii_preamble_required--;
625
626 if (phy_idx == 0) {
627 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
628 dev->name, ioread32(ioaddr + ASICCtrl));
629 goto err_out_unregister;
630 } 647 }
648 }
649 np->mii_preamble_required--;
631 650
632 np->mii_if.phy_id = np->phys[0]; 651 if (phy_idx == 0) {
652 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
653 dev->name, ioread32(ioaddr + ASICCtrl));
654 goto err_out_unregister;
633 } 655 }
634 656
657 np->mii_if.phy_id = np->phys[0];
658
635 /* Parse override configuration */ 659 /* Parse override configuration */
636 np->an_enable = 1; 660 np->an_enable = 1;
637 if (card_idx < MAX_UNITS) { 661 if (card_idx < MAX_UNITS) {
@@ -692,7 +716,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
692 /* Reset the chip to erase previous misconfiguration. */ 716 /* Reset the chip to erase previous misconfiguration. */
693 if (netif_msg_hw(np)) 717 if (netif_msg_hw(np))
694 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); 718 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
695 iowrite16(0x007f, ioaddr + ASICCtrl + 2); 719 iowrite16(0x00ff, ioaddr + ASICCtrl + 2);
696 if (netif_msg_hw(np)) 720 if (netif_msg_hw(np))
697 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); 721 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
698 722
@@ -1190,23 +1214,33 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1190 ("%s: Transmit status is %2.2x.\n", 1214 ("%s: Transmit status is %2.2x.\n",
1191 dev->name, tx_status); 1215 dev->name, tx_status);
1192 if (tx_status & 0x1e) { 1216 if (tx_status & 0x1e) {
1217 if (netif_msg_tx_err(np))
1218 printk("%s: Transmit error status %4.4x.\n",
1219 dev->name, tx_status);
1193 np->stats.tx_errors++; 1220 np->stats.tx_errors++;
1194 if (tx_status & 0x10) 1221 if (tx_status & 0x10)
1195 np->stats.tx_fifo_errors++; 1222 np->stats.tx_fifo_errors++;
1196 if (tx_status & 0x08) 1223 if (tx_status & 0x08)
1197 np->stats.collisions++; 1224 np->stats.collisions++;
1225 if (tx_status & 0x04)
1226 np->stats.tx_fifo_errors++;
1198 if (tx_status & 0x02) 1227 if (tx_status & 0x02)
1199 np->stats.tx_window_errors++; 1228 np->stats.tx_window_errors++;
1200 /* This reset has not been verified!. */ 1229 /*
1201 if (tx_status & 0x10) { /* Reset the Tx. */ 1230 ** This reset has been verified on
1202 np->stats.tx_fifo_errors++; 1231 ** DFE-580TX boards ! phdm@macqel.be.
1203 spin_lock(&np->lock); 1232 */
1204 reset_tx(dev); 1233 if (tx_status & 0x10) { /* TxUnderrun */
1205 spin_unlock(&np->lock); 1234 unsigned short txthreshold;
1235
1236 txthreshold = ioread16 (ioaddr + TxStartThresh);
1237 /* Restart Tx FIFO and transmitter */
1238 sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16);
1239 iowrite16 (txthreshold, ioaddr + TxStartThresh);
1240 /* No need to reset the Tx pointer here */
1206 } 1241 }
1207 if (tx_status & 0x1e) /* Restart the Tx. */ 1242 /* Restart the Tx. */
1208 iowrite16 (TxEnable, 1243 iowrite16 (TxEnable, ioaddr + MACCtrl1);
1209 ioaddr + MACCtrl1);
1210 } 1244 }
1211 /* Yup, this is a documentation bug. It cost me *hours*. */ 1245 /* Yup, this is a documentation bug. It cost me *hours*. */
1212 iowrite16 (0, ioaddr + TxStatus); 1246 iowrite16 (0, ioaddr + TxStatus);
@@ -1619,6 +1653,7 @@ static struct ethtool_ops ethtool_ops = {
1619 .get_link = get_link, 1653 .get_link = get_link,
1620 .get_msglevel = get_msglevel, 1654 .get_msglevel = get_msglevel,
1621 .set_msglevel = set_msglevel, 1655 .set_msglevel = set_msglevel,
1656 .get_perm_addr = ethtool_op_get_perm_addr,
1622}; 1657};
1623 1658
1624static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1659static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 1802c3b48799..1828a6bf8458 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -37,6 +37,7 @@
37#include <linux/tcp.h> 37#include <linux/tcp.h>
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39#include <linux/prefetch.h> 39#include <linux/prefetch.h>
40#include <linux/dma-mapping.h>
40 41
41#include <net/checksum.h> 42#include <net/checksum.h>
42 43
@@ -67,8 +68,8 @@
67 68
68#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
69#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
70#define DRV_MODULE_VERSION "3.42" 71#define DRV_MODULE_VERSION "3.43"
71#define DRV_MODULE_RELDATE "Oct 3, 2005" 72#define DRV_MODULE_RELDATE "Oct 24, 2005"
72 73
73#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -219,6 +220,10 @@ static struct pci_device_id tg3_pci_tbl[] = {
219 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, 221 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
221 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, 227 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780,
223 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, 228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, 229 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S,
@@ -466,6 +471,15 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
466 spin_unlock_irqrestore(&tp->indirect_lock, flags); 471 spin_unlock_irqrestore(&tp->indirect_lock, flags);
467} 472}
468 473
474static void tg3_write_mem_fast(struct tg3 *tp, u32 off, u32 val)
475{
476 /* If no workaround is needed, write to mem space directly */
477 if (tp->write32 != tg3_write_indirect_reg32)
478 tw32(NIC_SRAM_WIN_BASE + off, val);
479 else
480 tg3_write_mem(tp, off, val);
481}
482
469static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val) 483static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
470{ 484{
471 unsigned long flags; 485 unsigned long flags;
@@ -570,7 +584,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
570 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL); 584 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
571 u32 orig_clock_ctrl; 585 u32 orig_clock_ctrl;
572 586
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 587 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
574 return; 588 return;
575 589
576 orig_clock_ctrl = clock_ctrl; 590 orig_clock_ctrl = clock_ctrl;
@@ -1210,7 +1224,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state)
1210 CLOCK_CTRL_ALTCLK | 1224 CLOCK_CTRL_ALTCLK |
1211 CLOCK_CTRL_PWRDOWN_PLL133); 1225 CLOCK_CTRL_PWRDOWN_PLL133);
1212 udelay(40); 1226 udelay(40);
1213 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 1227 } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1214 /* do nothing */ 1228 /* do nothing */
1215 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 1229 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) { 1230 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
@@ -3712,14 +3726,14 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3712 dev->mtu = new_mtu; 3726 dev->mtu = new_mtu;
3713 3727
3714 if (new_mtu > ETH_DATA_LEN) { 3728 if (new_mtu > ETH_DATA_LEN) {
3715 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 3729 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
3716 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 3730 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
3717 ethtool_op_set_tso(dev, 0); 3731 ethtool_op_set_tso(dev, 0);
3718 } 3732 }
3719 else 3733 else
3720 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 3734 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
3721 } else { 3735 } else {
3722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 3736 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
3723 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 3737 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
3724 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE; 3738 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
3725 } 3739 }
@@ -3850,7 +3864,7 @@ static void tg3_init_rings(struct tg3 *tp)
3850 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES); 3864 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3851 3865
3852 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ; 3866 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
3853 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) && 3867 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
3854 (tp->dev->mtu > ETH_DATA_LEN)) 3868 (tp->dev->mtu > ETH_DATA_LEN))
3855 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ; 3869 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
3856 3870
@@ -3905,10 +3919,8 @@ static void tg3_init_rings(struct tg3 *tp)
3905 */ 3919 */
3906static void tg3_free_consistent(struct tg3 *tp) 3920static void tg3_free_consistent(struct tg3 *tp)
3907{ 3921{
3908 if (tp->rx_std_buffers) { 3922 kfree(tp->rx_std_buffers);
3909 kfree(tp->rx_std_buffers); 3923 tp->rx_std_buffers = NULL;
3910 tp->rx_std_buffers = NULL;
3911 }
3912 if (tp->rx_std) { 3924 if (tp->rx_std) {
3913 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES, 3925 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3914 tp->rx_std, tp->rx_std_mapping); 3926 tp->rx_std, tp->rx_std_mapping);
@@ -4347,7 +4359,7 @@ static int tg3_chip_reset(struct tg3 *tp)
4347 val &= ~PCIX_CAPS_RELAXED_ORDERING; 4359 val &= ~PCIX_CAPS_RELAXED_ORDERING;
4348 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val); 4360 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4349 4361
4350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 4362 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4351 u32 val; 4363 u32 val;
4352 4364
4353 /* Chip reset on 5780 will reset MSI enable bit, 4365 /* Chip reset on 5780 will reset MSI enable bit,
@@ -6003,7 +6015,7 @@ static int tg3_reset_hw(struct tg3 *tp)
6003 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK); 6015 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6004 6016
6005 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && 6017 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6006 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780)) 6018 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6007 limit = 8; 6019 limit = 8;
6008 else 6020 else
6009 limit = 16; 6021 limit = 16;
@@ -6191,14 +6203,16 @@ static void tg3_timer(unsigned long __opaque)
6191 tp->timer_counter = tp->timer_multiplier; 6203 tp->timer_counter = tp->timer_multiplier;
6192 } 6204 }
6193 6205
6194 /* Heartbeat is only sent once every 120 seconds. */ 6206 /* Heartbeat is only sent once every 2 seconds. */
6195 if (!--tp->asf_counter) { 6207 if (!--tp->asf_counter) {
6196 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) { 6208 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6197 u32 val; 6209 u32 val;
6198 6210
6199 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE); 6211 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_MBOX,
6200 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 6212 FWCMD_NICDRV_ALIVE2);
6201 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3); 6213 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6214 /* 5 seconds timeout */
6215 tg3_write_mem_fast(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6202 val = tr32(GRC_RX_CPU_EVENT); 6216 val = tr32(GRC_RX_CPU_EVENT);
6203 val |= (1 << 14); 6217 val |= (1 << 14);
6204 tw32(GRC_RX_CPU_EVENT, val); 6218 tw32(GRC_RX_CPU_EVENT, val);
@@ -6409,7 +6423,7 @@ static int tg3_open(struct net_device *dev)
6409 tp->timer_counter = tp->timer_multiplier = 6423 tp->timer_counter = tp->timer_multiplier =
6410 (HZ / tp->timer_offset); 6424 (HZ / tp->timer_offset);
6411 tp->asf_counter = tp->asf_multiplier = 6425 tp->asf_counter = tp->asf_multiplier =
6412 ((HZ / tp->timer_offset) * 120); 6426 ((HZ / tp->timer_offset) * 2);
6413 6427
6414 init_timer(&tp->timer); 6428 init_timer(&tp->timer);
6415 tp->timer.expires = jiffies + tp->timer_offset; 6429 tp->timer.expires = jiffies + tp->timer_offset;
@@ -7237,7 +7251,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7237 cmd->supported |= (SUPPORTED_1000baseT_Half | 7251 cmd->supported |= (SUPPORTED_1000baseT_Half |
7238 SUPPORTED_1000baseT_Full); 7252 SUPPORTED_1000baseT_Full);
7239 7253
7240 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) 7254 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
7241 cmd->supported |= (SUPPORTED_100baseT_Half | 7255 cmd->supported |= (SUPPORTED_100baseT_Half |
7242 SUPPORTED_100baseT_Full | 7256 SUPPORTED_100baseT_Full |
7243 SUPPORTED_10baseT_Half | 7257 SUPPORTED_10baseT_Half |
@@ -7264,7 +7278,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7264{ 7278{
7265 struct tg3 *tp = netdev_priv(dev); 7279 struct tg3 *tp = netdev_priv(dev);
7266 7280
7267 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 7281 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7268 /* These are the only valid advertisement bits allowed. */ 7282 /* These are the only valid advertisement bits allowed. */
7269 if (cmd->autoneg == AUTONEG_ENABLE && 7283 if (cmd->autoneg == AUTONEG_ENABLE &&
7270 (cmd->advertising & ~(ADVERTISED_1000baseT_Half | 7284 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
@@ -7272,7 +7286,17 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7272 ADVERTISED_Autoneg | 7286 ADVERTISED_Autoneg |
7273 ADVERTISED_FIBRE))) 7287 ADVERTISED_FIBRE)))
7274 return -EINVAL; 7288 return -EINVAL;
7275 } 7289 /* Fiber can only do SPEED_1000. */
7290 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7291 (cmd->speed != SPEED_1000))
7292 return -EINVAL;
7293 /* Copper cannot force SPEED_1000. */
7294 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7295 (cmd->speed == SPEED_1000))
7296 return -EINVAL;
7297 else if ((cmd->speed == SPEED_1000) &&
7298 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7299 return -EINVAL;
7276 7300
7277 tg3_full_lock(tp, 0); 7301 tg3_full_lock(tp, 0);
7278 7302
@@ -8380,7 +8404,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
8380 } 8404 }
8381 8405
8382 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) || 8406 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
8383 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)) { 8407 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8384 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { 8408 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
8385 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: 8409 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
8386 tp->nvram_jedecnum = JEDEC_ATMEL; 8410 tp->nvram_jedecnum = JEDEC_ATMEL;
@@ -8980,7 +9004,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
8980 9004
8981 tp->phy_id = eeprom_phy_id; 9005 tp->phy_id = eeprom_phy_id;
8982 if (eeprom_phy_serdes) { 9006 if (eeprom_phy_serdes) {
8983 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9007 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
8984 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES; 9008 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
8985 else 9009 else
8986 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES; 9010 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
@@ -9393,8 +9417,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9393 } 9417 }
9394 9418
9395 /* Find msi capability. */ 9419 /* Find msi capability. */
9396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
9421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9422 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
9397 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 9423 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
9424 }
9398 9425
9399 /* Initialize misc host control in PCI block. */ 9426 /* Initialize misc host control in PCI block. */
9400 tp->misc_host_ctrl |= (misc_ctrl_reg & 9427 tp->misc_host_ctrl |= (misc_ctrl_reg &
@@ -9412,7 +9439,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9412 9439
9413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || 9440 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
9414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || 9441 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
9415 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) 9442 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9416 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; 9443 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
9417 9444
9418 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) || 9445 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
@@ -9607,7 +9634,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
9607 * ether_setup() via the alloc_etherdev() call 9634 * ether_setup() via the alloc_etherdev() call
9608 */ 9635 */
9609 if (tp->dev->mtu > ETH_DATA_LEN && 9636 if (tp->dev->mtu > ETH_DATA_LEN &&
9610 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780) 9637 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
9611 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 9638 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
9612 9639
9613 /* Determine WakeOnLan speed to use. */ 9640 /* Determine WakeOnLan speed to use. */
@@ -9830,7 +9857,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
9830 mac_offset = 0x7c; 9857 mac_offset = 0x7c;
9831 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 && 9858 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9832 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) || 9859 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
9833 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 9860 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9834 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) 9861 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
9835 mac_offset = 0xcc; 9862 mac_offset = 0xcc;
9836 if (tg3_nvram_lock(tp)) 9863 if (tg3_nvram_lock(tp))
@@ -10148,6 +10175,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
10148 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) { 10175 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
10149 /* 5780 always in PCIX mode */ 10176 /* 5780 always in PCIX mode */
10150 tp->dma_rwctrl |= 0x00144000; 10177 tp->dma_rwctrl |= 0x00144000;
10178 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10179 /* 5714 always in PCIX mode */
10180 tp->dma_rwctrl |= 0x00148000;
10151 } else { 10181 } else {
10152 tp->dma_rwctrl |= 0x001b000f; 10182 tp->dma_rwctrl |= 0x001b000f;
10153 } 10183 }
@@ -10347,6 +10377,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
10347 case PHY_ID_BCM5705: return "5705"; 10377 case PHY_ID_BCM5705: return "5705";
10348 case PHY_ID_BCM5750: return "5750"; 10378 case PHY_ID_BCM5750: return "5750";
10349 case PHY_ID_BCM5752: return "5752"; 10379 case PHY_ID_BCM5752: return "5752";
10380 case PHY_ID_BCM5714: return "5714";
10350 case PHY_ID_BCM5780: return "5780"; 10381 case PHY_ID_BCM5780: return "5780";
10351 case PHY_ID_BCM8002: return "8002/serdes"; 10382 case PHY_ID_BCM8002: return "8002/serdes";
10352 case 0: return "serdes"; 10383 case 0: return "serdes";
@@ -10492,17 +10523,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
10492 } 10523 }
10493 10524
10494 /* Configure DMA attributes. */ 10525 /* Configure DMA attributes. */
10495 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL); 10526 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
10496 if (!err) { 10527 if (!err) {
10497 pci_using_dac = 1; 10528 pci_using_dac = 1;
10498 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL); 10529 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
10499 if (err < 0) { 10530 if (err < 0) {
10500 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA " 10531 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
10501 "for consistent allocations\n"); 10532 "for consistent allocations\n");
10502 goto err_out_free_res; 10533 goto err_out_free_res;
10503 } 10534 }
10504 } else { 10535 } else {
10505 err = pci_set_dma_mask(pdev, 0xffffffffULL); 10536 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
10506 if (err) { 10537 if (err) {
10507 printk(KERN_ERR PFX "No usable DMA configuration, " 10538 printk(KERN_ERR PFX "No usable DMA configuration, "
10508 "aborting.\n"); 10539 "aborting.\n");
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 2e733c60bfa4..fb7e2a5f4a08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -137,6 +137,7 @@
137#define ASIC_REV_5750 0x04 137#define ASIC_REV_5750 0x04
138#define ASIC_REV_5752 0x06 138#define ASIC_REV_5752 0x06
139#define ASIC_REV_5780 0x08 139#define ASIC_REV_5780 0x08
140#define ASIC_REV_5714 0x09
140#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) 141#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
141#define CHIPREV_5700_AX 0x70 142#define CHIPREV_5700_AX 0x70
142#define CHIPREV_5700_BX 0x71 143#define CHIPREV_5700_BX 0x71
@@ -531,6 +532,8 @@
531#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000 532#define MAC_SERDES_CFG_EDGE_SELECT 0x00001000
532#define MAC_SERDES_STAT 0x00000594 533#define MAC_SERDES_STAT 0x00000594
533/* 0x598 --> 0x5b0 unused */ 534/* 0x598 --> 0x5b0 unused */
535#define SERDES_RX_CTRL 0x000005b0 /* 5780/5714 only */
536#define SERDES_RX_SIG_DETECT 0x00000400
534#define SG_DIG_CTRL 0x000005b0 537#define SG_DIG_CTRL 0x000005b0
535#define SG_DIG_USING_HW_AUTONEG 0x80000000 538#define SG_DIG_USING_HW_AUTONEG 0x80000000
536#define SG_DIG_SOFT_RESET 0x40000000 539#define SG_DIG_SOFT_RESET 0x40000000
@@ -1329,6 +1332,8 @@
1329#define GRC_LCLCTRL_CLEARINT 0x00000002 1332#define GRC_LCLCTRL_CLEARINT 0x00000002
1330#define GRC_LCLCTRL_SETINT 0x00000004 1333#define GRC_LCLCTRL_SETINT 0x00000004
1331#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008 1334#define GRC_LCLCTRL_INT_ON_ATTN 0x00000008
1335#define GRC_LCLCTRL_USE_SIG_DETECT 0x00000010 /* 5714/5780 only */
1336#define GRC_LCLCTRL_USE_EXT_SIG_DETECT 0x00000020 /* 5714/5780 only */
1332#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020 1337#define GRC_LCLCTRL_GPIO_INPUT3 0x00000020
1333#define GRC_LCLCTRL_GPIO_OE3 0x00000040 1338#define GRC_LCLCTRL_GPIO_OE3 0x00000040
1334#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080 1339#define GRC_LCLCTRL_GPIO_OUTPUT3 0x00000080
@@ -1507,6 +1512,7 @@
1507#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004 1512#define FWCMD_NICDRV_IPV6ADDR_CHG 0x00000004
1508#define FWCMD_NICDRV_FIX_DMAR 0x00000005 1513#define FWCMD_NICDRV_FIX_DMAR 0x00000005
1509#define FWCMD_NICDRV_FIX_DMAW 0x00000006 1514#define FWCMD_NICDRV_FIX_DMAW 0x00000006
1515#define FWCMD_NICDRV_ALIVE2 0x0000000d
1510#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c 1516#define NIC_SRAM_FW_CMD_LEN_MBOX 0x00000b7c
1511#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80 1517#define NIC_SRAM_FW_CMD_DATA_MBOX 0x00000b80
1512#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00 1518#define NIC_SRAM_FW_ASF_STATUS_MBOX 0x00000c00
@@ -2175,6 +2181,7 @@ struct tg3 {
2175 TG3_FLG2_MII_SERDES) 2181 TG3_FLG2_MII_SERDES)
2176#define TG3_FLG2_PARALLEL_DETECT 0x01000000 2182#define TG3_FLG2_PARALLEL_DETECT 0x01000000
2177#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2183#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2184#define TG3_FLG2_5780_CLASS 0x04000000
2178 2185
2179 u32 split_mode_max_reqs; 2186 u32 split_mode_max_reqs;
2180#define SPLIT_MODE_5704_MAX_REQ 3 2187#define SPLIT_MODE_5704_MAX_REQ 3
@@ -2222,6 +2229,7 @@ struct tg3 {
2222#define PHY_ID_BCM5705 0x600081a0 2229#define PHY_ID_BCM5705 0x600081a0
2223#define PHY_ID_BCM5750 0x60008180 2230#define PHY_ID_BCM5750 0x60008180
2224#define PHY_ID_BCM5752 0x60008100 2231#define PHY_ID_BCM5752 0x60008100
2232#define PHY_ID_BCM5714 0x60008340
2225#define PHY_ID_BCM5780 0x60008350 2233#define PHY_ID_BCM5780 0x60008350
2226#define PHY_ID_BCM8002 0x60010140 2234#define PHY_ID_BCM8002 0x60010140
2227#define PHY_ID_INVALID 0xffffffff 2235#define PHY_ID_INVALID 0xffffffff
@@ -2246,8 +2254,8 @@ struct tg3 {
2246 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \ 2254 (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
2247 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ 2255 (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
2248 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ 2256 (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
2249 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5780 || \ 2257 (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
2250 (X) == PHY_ID_BCM8002) 2258 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002)
2251 2259
2252 struct tg3_hw_stats *hw_stats; 2260 struct tg3_hw_stats *hw_stats;
2253 dma_addr_t stats_mapping; 2261 dma_addr_t stats_mapping;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 32057e65808b..9f491563944e 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -318,7 +318,7 @@ static void ibmtr_cleanup_card(struct net_device *dev)
318 if (dev->base_addr) { 318 if (dev->base_addr) {
319 outb(0,dev->base_addr+ADAPTRESET); 319 outb(0,dev->base_addr+ADAPTRESET);
320 320
321 schedule_timeout(TR_RST_TIME); /* wait 50ms */ 321 schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
322 322
323 outb(0,dev->base_addr+ADAPTRESETREL); 323 outb(0,dev->base_addr+ADAPTRESETREL);
324 } 324 }
@@ -854,8 +854,7 @@ static int tok_init_card(struct net_device *dev)
854 writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN); 854 writeb(~INT_ENABLE, ti->mmio + ACA_OFFSET + ACA_RESET + ISRP_EVEN);
855 outb(0, PIOaddr + ADAPTRESET); 855 outb(0, PIOaddr + ADAPTRESET);
856 856
857 current->state=TASK_UNINTERRUPTIBLE; 857 schedule_timeout_uninterruptible(TR_RST_TIME); /* wait 50ms */
858 schedule_timeout(TR_RST_TIME); /* wait 50ms */
859 858
860 outb(0, PIOaddr + ADAPTRESETREL); 859 outb(0, PIOaddr + ADAPTRESETREL);
861#ifdef ENABLE_PAGING 860#ifdef ENABLE_PAGING
@@ -903,8 +902,8 @@ static int tok_open(struct net_device *dev)
903 DPRINTK("Adapter is up and running\n"); 902 DPRINTK("Adapter is up and running\n");
904 return 0; 903 return 0;
905 } 904 }
906 current->state=TASK_INTERRUPTIBLE; 905 i=schedule_timeout_interruptible(TR_RETRY_INTERVAL);
907 i=schedule_timeout(TR_RETRY_INTERVAL); /* wait 30 seconds */ 906 /* wait 30 seconds */
908 if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */ 907 if(i!=0) break; /*prob. a signal, like the i>24*HZ case above */
909 } 908 }
910 outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/ 909 outb(0, dev->base_addr + ADAPTRESET);/* kill pending interrupts*/
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 9e7923192a49..05477d24fd49 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
1101 1101
1102 while(olympic_priv->srb_queued) { 1102 while(olympic_priv->srb_queued) {
1103 1103
1104 t = schedule_timeout(60*HZ); 1104 t = schedule_timeout_interruptible(60*HZ);
1105 1105
1106 if(signal_pending(current)) { 1106 if(signal_pending(current)) {
1107 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name); 1107 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 2e39bf1f7462..c1925590a0e1 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1243,8 +1243,7 @@ void tms380tr_wait(unsigned long time)
1243 1243
1244 tmp = jiffies + time/(1000000/HZ); 1244 tmp = jiffies + time/(1000000/HZ);
1245 do { 1245 do {
1246 current->state = TASK_INTERRUPTIBLE; 1246 tmp = schedule_timeout_interruptible(tmp);
1247 tmp = schedule_timeout(tmp);
1248 } while(time_after(tmp, jiffies)); 1247 } while(time_after(tmp, jiffies));
1249#else 1248#else
1250 udelay(time); 1249 udelay(time);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index a22d00198e4d..d7fb3ffe06ac 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -1787,10 +1787,15 @@ static void __init de21041_get_srom_info (struct de_private *de)
1787 /* DEC now has a specification but early board makers 1787 /* DEC now has a specification but early board makers
1788 just put the address in the first EEPROM locations. */ 1788 just put the address in the first EEPROM locations. */
1789 /* This does memcmp(eedata, eedata+16, 8) */ 1789 /* This does memcmp(eedata, eedata+16, 8) */
1790
1791#ifndef CONFIG_MIPS_COBALT
1792
1790 for (i = 0; i < 8; i ++) 1793 for (i = 0; i < 8; i ++)
1791 if (ee_data[i] != ee_data[16+i]) 1794 if (ee_data[i] != ee_data[16+i])
1792 sa_offset = 20; 1795 sa_offset = 20;
1793 1796
1797#endif
1798
1794 /* store MAC address */ 1799 /* store MAC address */
1795 for (i = 0; i < 6; i ++) 1800 for (i = 0; i < 6; i ++)
1796 de->dev->dev_addr[i] = ee_data[i + sa_offset]; 1801 de->dev->dev_addr[i] = ee_data[i + sa_offset];
@@ -2071,8 +2076,7 @@ static int __init de_init_one (struct pci_dev *pdev,
2071 return 0; 2076 return 0;
2072 2077
2073err_out_iomap: 2078err_out_iomap:
2074 if (de->ee_data) 2079 kfree(de->ee_data);
2075 kfree(de->ee_data);
2076 iounmap(regs); 2080 iounmap(regs);
2077err_out_res: 2081err_out_res:
2078 pci_release_regions(pdev); 2082 pci_release_regions(pdev);
@@ -2091,8 +2095,7 @@ static void __exit de_remove_one (struct pci_dev *pdev)
2091 if (!dev) 2095 if (!dev)
2092 BUG(); 2096 BUG();
2093 unregister_netdev(dev); 2097 unregister_netdev(dev);
2094 if (de->ee_data) 2098 kfree(de->ee_data);
2095 kfree(de->ee_data);
2096 iounmap(de->regs); 2099 iounmap(de->regs);
2097 pci_release_regions(pdev); 2100 pci_release_regions(pdev);
2098 pci_disable_device(pdev); 2101 pci_disable_device(pdev);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 6266a9a7e6e3..125ed00e95a5 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1727,8 +1727,7 @@ err_out_free_ring:
1727 tp->rx_ring, tp->rx_ring_dma); 1727 tp->rx_ring, tp->rx_ring_dma);
1728 1728
1729err_out_mtable: 1729err_out_mtable:
1730 if (tp->mtable) 1730 kfree (tp->mtable);
1731 kfree (tp->mtable);
1732 pci_iounmap(pdev, ioaddr); 1731 pci_iounmap(pdev, ioaddr);
1733 1732
1734err_out_free_res: 1733err_out_free_res:
@@ -1806,8 +1805,7 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
1806 sizeof (struct tulip_rx_desc) * RX_RING_SIZE + 1805 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1807 sizeof (struct tulip_tx_desc) * TX_RING_SIZE, 1806 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1808 tp->rx_ring, tp->rx_ring_dma); 1807 tp->rx_ring, tp->rx_ring_dma);
1809 if (tp->mtable) 1808 kfree (tp->mtable);
1810 kfree (tp->mtable);
1811 pci_iounmap(pdev, tp->base_addr); 1809 pci_iounmap(pdev, tp->base_addr);
1812 free_netdev (dev); 1810 free_netdev (dev);
1813 pci_release_regions (pdev); 1811 pci_release_regions (pdev);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index ecfa6f8805ce..4c76cb794bfb 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -419,10 +419,9 @@ typhoon_reset(void __iomem *ioaddr, int wait_type)
419 TYPHOON_STATUS_WAITING_FOR_HOST) 419 TYPHOON_STATUS_WAITING_FOR_HOST)
420 goto out; 420 goto out;
421 421
422 if(wait_type == WaitSleep) { 422 if(wait_type == WaitSleep)
423 set_current_state(TASK_UNINTERRUPTIBLE); 423 schedule_timeout_uninterruptible(1);
424 schedule_timeout(1); 424 else
425 } else
426 udelay(TYPHOON_UDELAY); 425 udelay(TYPHOON_UDELAY);
427 } 426 }
428 427
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index fc7738ffbfff..241871589283 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -490,6 +490,8 @@ struct rhine_private {
490 u8 tx_thresh, rx_thresh; 490 u8 tx_thresh, rx_thresh;
491 491
492 struct mii_if_info mii_if; 492 struct mii_if_info mii_if;
493 struct work_struct tx_timeout_task;
494 struct work_struct check_media_task;
493 void __iomem *base; 495 void __iomem *base;
494}; 496};
495 497
@@ -497,6 +499,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
497static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 499static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
498static int rhine_open(struct net_device *dev); 500static int rhine_open(struct net_device *dev);
499static void rhine_tx_timeout(struct net_device *dev); 501static void rhine_tx_timeout(struct net_device *dev);
502static void rhine_tx_timeout_task(struct net_device *dev);
503static void rhine_check_media_task(struct net_device *dev);
500static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 504static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
501static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 505static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
502static void rhine_tx(struct net_device *dev); 506static void rhine_tx(struct net_device *dev);
@@ -814,8 +818,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
814 818
815 for (i = 0; i < 6; i++) 819 for (i = 0; i < 6; i++)
816 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i); 820 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
821 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
817 822
818 if (!is_valid_ether_addr(dev->dev_addr)) { 823 if (!is_valid_ether_addr(dev->perm_addr)) {
819 rc = -EIO; 824 rc = -EIO;
820 printk(KERN_ERR "Invalid MAC address\n"); 825 printk(KERN_ERR "Invalid MAC address\n");
821 goto err_out_unmap; 826 goto err_out_unmap;
@@ -850,6 +855,12 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
850 if (rp->quirks & rqRhineI) 855 if (rp->quirks & rqRhineI)
851 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 856 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
852 857
858 INIT_WORK(&rp->tx_timeout_task,
859 (void (*)(void *))rhine_tx_timeout_task, dev);
860
861 INIT_WORK(&rp->check_media_task,
862 (void (*)(void *))rhine_check_media_task, dev);
863
853 /* dev->name not defined before register_netdev()! */ 864 /* dev->name not defined before register_netdev()! */
854 rc = register_netdev(dev); 865 rc = register_netdev(dev);
855 if (rc) 866 if (rc)
@@ -1076,6 +1087,11 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1076 ioaddr + ChipCmd1); 1087 ioaddr + ChipCmd1);
1077} 1088}
1078 1089
1090static void rhine_check_media_task(struct net_device *dev)
1091{
1092 rhine_check_media(dev, 0);
1093}
1094
1079static void init_registers(struct net_device *dev) 1095static void init_registers(struct net_device *dev)
1080{ 1096{
1081 struct rhine_private *rp = netdev_priv(dev); 1097 struct rhine_private *rp = netdev_priv(dev);
@@ -1129,8 +1145,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1129 if (quirks & rqRhineI) { 1145 if (quirks & rqRhineI) {
1130 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1146 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1131 1147
1132 /* Can be called from ISR. Evil. */ 1148 /* Do not call from ISR! */
1133 mdelay(1); 1149 msleep(1);
1134 1150
1135 /* 0x80 must be set immediately before turning it off */ 1151 /* 0x80 must be set immediately before turning it off */
1136 iowrite8(0x80, ioaddr + MIICmd); 1152 iowrite8(0x80, ioaddr + MIICmd);
@@ -1220,6 +1236,16 @@ static int rhine_open(struct net_device *dev)
1220static void rhine_tx_timeout(struct net_device *dev) 1236static void rhine_tx_timeout(struct net_device *dev)
1221{ 1237{
1222 struct rhine_private *rp = netdev_priv(dev); 1238 struct rhine_private *rp = netdev_priv(dev);
1239
1240 /*
1241 * Move bulk of work outside of interrupt context
1242 */
1243 schedule_work(&rp->tx_timeout_task);
1244}
1245
1246static void rhine_tx_timeout_task(struct net_device *dev)
1247{
1248 struct rhine_private *rp = netdev_priv(dev);
1223 void __iomem *ioaddr = rp->base; 1249 void __iomem *ioaddr = rp->base;
1224 1250
1225 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1251 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1625,7 +1651,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1625 spin_lock(&rp->lock); 1651 spin_lock(&rp->lock);
1626 1652
1627 if (intr_status & IntrLinkChange) 1653 if (intr_status & IntrLinkChange)
1628 rhine_check_media(dev, 0); 1654 schedule_work(&rp->check_media_task);
1629 if (intr_status & IntrStatsMax) { 1655 if (intr_status & IntrStatsMax) {
1630 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1656 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1631 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1657 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1829,6 +1855,7 @@ static struct ethtool_ops netdev_ethtool_ops = {
1829 .set_wol = rhine_set_wol, 1855 .set_wol = rhine_set_wol,
1830 .get_sg = ethtool_op_get_sg, 1856 .get_sg = ethtool_op_get_sg,
1831 .get_tx_csum = ethtool_op_get_tx_csum, 1857 .get_tx_csum = ethtool_op_get_tx_csum,
1858 .get_perm_addr = ethtool_op_get_perm_addr,
1832}; 1859};
1833 1860
1834static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1861static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
@@ -1872,6 +1899,9 @@ static int rhine_close(struct net_device *dev)
1872 spin_unlock_irq(&rp->lock); 1899 spin_unlock_irq(&rp->lock);
1873 1900
1874 free_irq(rp->pdev->irq, dev); 1901 free_irq(rp->pdev->irq, dev);
1902
1903 flush_scheduled_work();
1904
1875 free_rbufs(dev); 1905 free_rbufs(dev);
1876 free_tbufs(dev); 1906 free_tbufs(dev);
1877 free_ring(dev); 1907 free_ring(dev);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index abc5cee6eedc..a368d08e7d19 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1212,10 +1212,8 @@ static void velocity_free_td_ring(struct velocity_info *vptr)
1212 velocity_free_td_ring_entry(vptr, j, i); 1212 velocity_free_td_ring_entry(vptr, j, i);
1213 1213
1214 } 1214 }
1215 if (vptr->td_infos[j]) { 1215 kfree(vptr->td_infos[j]);
1216 kfree(vptr->td_infos[j]); 1216 vptr->td_infos[j] = NULL;
1217 vptr->td_infos[j] = NULL;
1218 }
1219 } 1217 }
1220} 1218}
1221 1219
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7ff814fd65d0..e392ee8b37a1 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -400,7 +400,7 @@ static int __init cosa_init(void)
400 goto out_chrdev; 400 goto out_chrdev;
401 } 401 }
402 for (i=0; i<nr_cards; i++) { 402 for (i=0; i<nr_cards; i++) {
403 class_device_create(cosa_class, MKDEV(cosa_major, i), 403 class_device_create(cosa_class, NULL, MKDEV(cosa_major, i),
404 NULL, "cosa%d", i); 404 NULL, "cosa%d", i);
405 err = devfs_mk_cdev(MKDEV(cosa_major, i), 405 err = devfs_mk_cdev(MKDEV(cosa_major, i),
406 S_IFCHR|S_IRUSR|S_IWUSR, 406 S_IFCHR|S_IRUSR|S_IWUSR,
@@ -1617,8 +1617,7 @@ static int get_wait_data(struct cosa_data *cosa)
1617 return r; 1617 return r;
1618 } 1618 }
1619 /* sleep if not ready to read */ 1619 /* sleep if not ready to read */
1620 set_current_state(TASK_INTERRUPTIBLE); 1620 schedule_timeout_interruptible(1);
1621 schedule_timeout(1);
1622 } 1621 }
1623 printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n", 1622 printk(KERN_INFO "cosa: timeout in get_wait_data (status 0x%x)\n",
1624 cosa_getstatus(cosa)); 1623 cosa_getstatus(cosa));
@@ -1644,8 +1643,7 @@ static int put_wait_data(struct cosa_data *cosa, int data)
1644 } 1643 }
1645#if 0 1644#if 0
1646 /* sleep if not ready to read */ 1645 /* sleep if not ready to read */
1647 current->state = TASK_INTERRUPTIBLE; 1646 schedule_timeout_interruptible(1);
1648 schedule_timeout(1);
1649#endif 1647#endif
1650 } 1648 }
1651 printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n", 1649 printk(KERN_INFO "cosa%d: timeout in put_wait_data (status 0x%x)\n",
diff --git a/drivers/net/wan/cycx_drv.c b/drivers/net/wan/cycx_drv.c
index 9e56fc346ba4..e6d005726aad 100644
--- a/drivers/net/wan/cycx_drv.c
+++ b/drivers/net/wan/cycx_drv.c
@@ -109,7 +109,7 @@ static long cycx_2x_irq_options[] = { 7, 3, 5, 9, 10, 11, 12, 15 };
109 * < 0 error. 109 * < 0 error.
110 * Context: process */ 110 * Context: process */
111 111
112int __init cycx_drv_init(void) 112static int __init cycx_drv_init(void)
113{ 113{
114 printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE, 114 printk(KERN_INFO "%s v%u.%u %s\n", fullname, MOD_VERSION, MOD_RELEASE,
115 copyright); 115 copyright);
@@ -119,7 +119,7 @@ int __init cycx_drv_init(void)
119 119
120/* Module 'remove' entry point. 120/* Module 'remove' entry point.
121 * o release all remaining system resources */ 121 * o release all remaining system resources */
122void cycx_drv_cleanup(void) 122static void cycx_drv_cleanup(void)
123{ 123{
124} 124}
125 125
@@ -184,8 +184,7 @@ int cycx_down(struct cycx_hw *hw)
184} 184}
185 185
186/* Enable interrupt generation. */ 186/* Enable interrupt generation. */
187EXPORT_SYMBOL(cycx_inten); 187static void cycx_inten(struct cycx_hw *hw)
188void cycx_inten(struct cycx_hw *hw)
189{ 188{
190 writeb(0, hw->dpmbase); 189 writeb(0, hw->dpmbase);
191} 190}
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 7b48064364dc..430b1f630fb4 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -103,7 +103,7 @@ static struct cycx_device *cycx_card_array; /* adapter data space */
103 * < 0 error. 103 * < 0 error.
104 * Context: process 104 * Context: process
105 */ 105 */
106int __init cycx_init(void) 106static int __init cycx_init(void)
107{ 107{
108 int cnt, err = -ENOMEM; 108 int cnt, err = -ENOMEM;
109 109
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index 02d57c0b4243..a631d1c2fa14 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -78,6 +78,7 @@
78 78
79#define CYCLOMX_X25_DEBUG 1 79#define CYCLOMX_X25_DEBUG 1
80 80
81#include <linux/ctype.h> /* isdigit() */
81#include <linux/errno.h> /* return codes */ 82#include <linux/errno.h> /* return codes */
82#include <linux/if_arp.h> /* ARPHRD_HWX25 */ 83#include <linux/if_arp.h> /* ARPHRD_HWX25 */
83#include <linux/kernel.h> /* printk(), and other useful stuff */ 84#include <linux/kernel.h> /* printk(), and other useful stuff */
@@ -418,7 +419,7 @@ static int cycx_wan_new_if(struct wan_device *wandev, struct net_device *dev,
418 419
419 /* Set channel timeouts (default if not specified) */ 420 /* Set channel timeouts (default if not specified) */
420 chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90; 421 chan->idle_tmout = conf->idle_timeout ? conf->idle_timeout : 90;
421 } else if (is_digit(conf->addr[0])) { /* PVC */ 422 } else if (isdigit(conf->addr[0])) { /* PVC */
422 s16 lcn = dec_to_uint(conf->addr, 0); 423 s16 lcn = dec_to_uint(conf->addr, 0);
423 424
424 if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc) 425 if (lcn >= card->u.x.lo_pvc && lcn <= card->u.x.hi_pvc)
@@ -1531,7 +1532,7 @@ static unsigned dec_to_uint(u8 *str, int len)
1531 if (!len) 1532 if (!len)
1532 len = strlen(str); 1533 len = strlen(str);
1533 1534
1534 for (; len && is_digit(*str); ++str, --len) 1535 for (; len && isdigit(*str); ++str, --len)
1535 val = (val * 10) + (*str - (unsigned) '0'); 1536 val = (val * 10) + (*str - (unsigned) '0');
1536 1537
1537 return val; 1538 return val;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 520a77a798e2..2f61a47b4716 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -446,8 +446,8 @@ static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
446 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda; 446 return readl(dpriv->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
447} 447}
448 448
449int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev, 449static int state_check(u32 state, struct dscc4_dev_priv *dpriv,
450 const char *msg) 450 struct net_device *dev, const char *msg)
451{ 451{
452 int ret = 0; 452 int ret = 0;
453 453
@@ -466,8 +466,9 @@ int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
466 return ret; 466 return ret;
467} 467}
468 468
469void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv, 469static void dscc4_tx_print(struct net_device *dev,
470 char *msg) 470 struct dscc4_dev_priv *dpriv,
471 char *msg)
471{ 472{
472 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n", 473 printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
473 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg); 474 dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
@@ -507,7 +508,8 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
507 } 508 }
508} 509}
509 510
510inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) 511static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv,
512 struct net_device *dev)
511{ 513{
512 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; 514 unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
513 struct RxFD *rx_fd = dpriv->rx_fd + dirty; 515 struct RxFD *rx_fd = dpriv->rx_fd + dirty;
@@ -542,8 +544,7 @@ static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
542 msg, i); 544 msg, i);
543 goto done; 545 goto done;
544 } 546 }
545 set_current_state(TASK_UNINTERRUPTIBLE); 547 schedule_timeout_uninterruptible(10);
546 schedule_timeout(10);
547 rmb(); 548 rmb();
548 } while (++i > 0); 549 } while (++i > 0);
549 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg); 550 printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
@@ -588,8 +589,7 @@ static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
588 (dpriv->iqtx[cur] & Xpr)) 589 (dpriv->iqtx[cur] & Xpr))
589 break; 590 break;
590 smp_rmb(); 591 smp_rmb();
591 set_current_state(TASK_UNINTERRUPTIBLE); 592 schedule_timeout_uninterruptible(10);
592 schedule_timeout(10);
593 } while (++i > 0); 593 } while (++i > 0);
594 594
595 return (i >= 0 ) ? i : -EAGAIN; 595 return (i >= 0 ) ? i : -EAGAIN;
@@ -1035,8 +1035,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr)
1035 /* Flush posted writes */ 1035 /* Flush posted writes */
1036 readl(ioaddr + GSTAR); 1036 readl(ioaddr + GSTAR);
1037 1037
1038 set_current_state(TASK_UNINTERRUPTIBLE); 1038 schedule_timeout_uninterruptible(10);
1039 schedule_timeout(10);
1040 1039
1041 for (i = 0; i < 16; i++) 1040 for (i = 0; i < 16; i++)
1042 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); 1041 pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
@@ -1894,7 +1893,7 @@ try:
1894 * It failed and locked solid. Thus the introduction of a dummy skb. 1893 * It failed and locked solid. Thus the introduction of a dummy skb.
1895 * Problem is acknowledged in errata sheet DS5. Joy :o/ 1894 * Problem is acknowledged in errata sheet DS5. Joy :o/
1896 */ 1895 */
1897struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) 1896static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
1898{ 1897{
1899 struct sk_buff *skb; 1898 struct sk_buff *skb;
1900 1899
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 2c83cca34b86..7981a2c7906e 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -74,11 +74,11 @@ MODULE_LICENSE("GPL");
74/* 74/*
75 * Modules parameters and associated varaibles 75 * Modules parameters and associated varaibles
76 */ 76 */
77int fst_txq_low = FST_LOW_WATER_MARK; 77static int fst_txq_low = FST_LOW_WATER_MARK;
78int fst_txq_high = FST_HIGH_WATER_MARK; 78static int fst_txq_high = FST_HIGH_WATER_MARK;
79int fst_max_reads = 7; 79static int fst_max_reads = 7;
80int fst_excluded_cards = 0; 80static int fst_excluded_cards = 0;
81int fst_excluded_list[FST_MAX_CARDS]; 81static int fst_excluded_list[FST_MAX_CARDS];
82 82
83module_param(fst_txq_low, int, 0); 83module_param(fst_txq_low, int, 0);
84module_param(fst_txq_high, int, 0); 84module_param(fst_txq_high, int, 0);
@@ -572,13 +572,13 @@ static void do_bottom_half_rx(struct fst_card_info *card);
572static void fst_process_tx_work_q(unsigned long work_q); 572static void fst_process_tx_work_q(unsigned long work_q);
573static void fst_process_int_work_q(unsigned long work_q); 573static void fst_process_int_work_q(unsigned long work_q);
574 574
575DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0); 575static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
576DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0); 576static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
577 577
578struct fst_card_info *fst_card_array[FST_MAX_CARDS]; 578static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
579spinlock_t fst_work_q_lock; 579static spinlock_t fst_work_q_lock;
580u64 fst_work_txq; 580static u64 fst_work_txq;
581u64 fst_work_intq; 581static u64 fst_work_intq;
582 582
583static void 583static void
584fst_q_work_item(u64 * queue, int card_index) 584fst_q_work_item(u64 * queue, int card_index)
@@ -980,8 +980,7 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
980 /* Wait for any previous command to complete */ 980 /* Wait for any previous command to complete */
981 while (mbval > NAK) { 981 while (mbval > NAK) {
982 spin_unlock_irqrestore(&card->card_lock, flags); 982 spin_unlock_irqrestore(&card->card_lock, flags);
983 set_current_state(TASK_UNINTERRUPTIBLE); 983 schedule_timeout_uninterruptible(1);
984 schedule_timeout(1);
985 spin_lock_irqsave(&card->card_lock, flags); 984 spin_lock_irqsave(&card->card_lock, flags);
986 985
987 if (++safety > 2000) { 986 if (++safety > 2000) {
@@ -1498,7 +1497,7 @@ do_bottom_half_rx(struct fst_card_info *card)
1498 * The interrupt service routine 1497 * The interrupt service routine
1499 * Dev_id is our fst_card_info pointer 1498 * Dev_id is our fst_card_info pointer
1500 */ 1499 */
1501irqreturn_t 1500static irqreturn_t
1502fst_intr(int irq, void *dev_id, struct pt_regs *regs) 1501fst_intr(int irq, void *dev_id, struct pt_regs *regs)
1503{ 1502{
1504 struct fst_card_info *card; 1503 struct fst_card_info *card;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index a5d6891c9d4c..e1601d35dced 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -330,7 +330,7 @@ static int pvc_close(struct net_device *dev)
330 330
331 331
332 332
333int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 333static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
334{ 334{
335 pvc_device *pvc = dev_to_pvc(dev); 335 pvc_device *pvc = dev_to_pvc(dev);
336 fr_proto_pvc_info info; 336 fr_proto_pvc_info info;
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index 9dccd9546a17..3b94352b0d03 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -8,10 +8,10 @@
8/* 8/*
9 * Prints out len, max to 80 octets using printk, 20 per line 9 * Prints out len, max to 80 octets using printk, 20 per line
10 */ 10 */
11void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
12{
13#ifdef DEBUG 11#ifdef DEBUG
14#ifdef LMC_PACKET_LOG 12#ifdef LMC_PACKET_LOG
13void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
14{
15 int iNewLine = 1; 15 int iNewLine = 1;
16 char str[80], *pstr; 16 char str[80], *pstr;
17 17
@@ -43,26 +43,24 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen)
43 } 43 }
44 sprintf(pstr, "\n"); 44 sprintf(pstr, "\n");
45 printk(str); 45 printk(str);
46}
46#endif 47#endif
47#endif 48#endif
48}
49 49
50#ifdef DEBUG 50#ifdef DEBUG
51u_int32_t lmcEventLogIndex = 0; 51u_int32_t lmcEventLogIndex = 0;
52u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; 52u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
53#endif
54 53
55void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) 54void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3)
56{ 55{
57#ifdef DEBUG
58 lmcEventLogBuf[lmcEventLogIndex++] = EventNum; 56 lmcEventLogBuf[lmcEventLogIndex++] = EventNum;
59 lmcEventLogBuf[lmcEventLogIndex++] = arg2; 57 lmcEventLogBuf[lmcEventLogIndex++] = arg2;
60 lmcEventLogBuf[lmcEventLogIndex++] = arg3; 58 lmcEventLogBuf[lmcEventLogIndex++] = arg3;
61 lmcEventLogBuf[lmcEventLogIndex++] = jiffies; 59 lmcEventLogBuf[lmcEventLogIndex++] = jiffies;
62 60
63 lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1; 61 lmcEventLogIndex &= (LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS) - 1;
64#endif
65} 62}
63#endif /* DEBUG */
66 64
67void lmc_trace(struct net_device *dev, char *msg){ 65void lmc_trace(struct net_device *dev, char *msg){
68#ifdef LMC_TRACE 66#ifdef LMC_TRACE
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index f55ce76b00ed..af8b55fdd9d9 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -48,14 +48,6 @@
48 */ 48 */
49 49
50/* 50/*
51 * For lack of a better place, put the SSI cable stuff here.
52 */
53char *lmc_t1_cables[] = {
54 "V.10/RS423", "EIA530A", "reserved", "X.21", "V.35",
55 "EIA449/EIA530/V.36", "V.28/EIA232", "none", NULL
56};
57
58/*
59 * protocol independent method. 51 * protocol independent method.
60 */ 52 */
61static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *); 53static void lmc_set_protocol (lmc_softc_t * const, lmc_ctl_t *);
diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h
index 73401b0f0151..2024b26b99e6 100644
--- a/drivers/net/wan/pc300.h
+++ b/drivers/net/wan/pc300.h
@@ -472,24 +472,8 @@ enum pc300_loopback_cmds {
472 472
473#ifdef __KERNEL__ 473#ifdef __KERNEL__
474/* Function Prototypes */ 474/* Function Prototypes */
475int dma_buf_write(pc300_t *, int, ucchar *, int);
476int dma_buf_read(pc300_t *, int, struct sk_buff *);
477void tx_dma_start(pc300_t *, int); 475void tx_dma_start(pc300_t *, int);
478void rx_dma_start(pc300_t *, int);
479void tx_dma_stop(pc300_t *, int);
480void rx_dma_stop(pc300_t *, int);
481int cpc_queue_xmit(struct sk_buff *, struct net_device *);
482void cpc_net_rx(struct net_device *);
483void cpc_sca_status(pc300_t *, int);
484int cpc_change_mtu(struct net_device *, int);
485int cpc_ioctl(struct net_device *, struct ifreq *, int);
486int ch_config(pc300dev_t *);
487int rx_config(pc300dev_t *);
488int tx_config(pc300dev_t *);
489void cpc_opench(pc300dev_t *);
490void cpc_closech(pc300dev_t *);
491int cpc_open(struct net_device *dev); 476int cpc_open(struct net_device *dev);
492int cpc_close(struct net_device *dev);
493int cpc_set_media(hdlc_device *, int); 477int cpc_set_media(hdlc_device *, int);
494#endif /* __KERNEL__ */ 478#endif /* __KERNEL__ */
495 479
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3e7753b10717..a3e65d1bc19b 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -291,6 +291,7 @@ static uclong detect_ram(pc300_t *);
291static void plx_init(pc300_t *); 291static void plx_init(pc300_t *);
292static void cpc_trace(struct net_device *, struct sk_buff *, char); 292static void cpc_trace(struct net_device *, struct sk_buff *, char);
293static int cpc_attach(struct net_device *, unsigned short, unsigned short); 293static int cpc_attach(struct net_device *, unsigned short, unsigned short);
294static int cpc_close(struct net_device *dev);
294 295
295#ifdef CONFIG_PC300_MLPPP 296#ifdef CONFIG_PC300_MLPPP
296void cpc_tty_init(pc300dev_t * dev); 297void cpc_tty_init(pc300dev_t * dev);
@@ -437,7 +438,7 @@ static void rx_dma_buf_check(pc300_t * card, int ch)
437 printk("\n"); 438 printk("\n");
438} 439}
439 440
440int dma_get_rx_frame_size(pc300_t * card, int ch) 441static int dma_get_rx_frame_size(pc300_t * card, int ch)
441{ 442{
442 volatile pcsca_bd_t __iomem *ptdescr; 443 volatile pcsca_bd_t __iomem *ptdescr;
443 ucshort first_bd = card->chan[ch].rx_first_bd; 444 ucshort first_bd = card->chan[ch].rx_first_bd;
@@ -462,7 +463,7 @@ int dma_get_rx_frame_size(pc300_t * card, int ch)
462 * dma_buf_write: writes a frame to the Tx DMA buffers 463 * dma_buf_write: writes a frame to the Tx DMA buffers
463 * NOTE: this function writes one frame at a time. 464 * NOTE: this function writes one frame at a time.
464 */ 465 */
465int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) 466static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
466{ 467{
467 int i, nchar; 468 int i, nchar;
468 volatile pcsca_bd_t __iomem *ptdescr; 469 volatile pcsca_bd_t __iomem *ptdescr;
@@ -503,7 +504,7 @@ int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len)
503 * dma_buf_read: reads a frame from the Rx DMA buffers 504 * dma_buf_read: reads a frame from the Rx DMA buffers
504 * NOTE: this function reads one frame at a time. 505 * NOTE: this function reads one frame at a time.
505 */ 506 */
506int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) 507static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
507{ 508{
508 int nchar; 509 int nchar;
509 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 510 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -560,7 +561,7 @@ int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb)
560 return (rcvd); 561 return (rcvd);
561} 562}
562 563
563void tx_dma_stop(pc300_t * card, int ch) 564static void tx_dma_stop(pc300_t * card, int ch)
564{ 565{
565 void __iomem *scabase = card->hw.scabase; 566 void __iomem *scabase = card->hw.scabase;
566 ucchar drr_ena_bit = 1 << (5 + 2 * ch); 567 ucchar drr_ena_bit = 1 << (5 + 2 * ch);
@@ -571,7 +572,7 @@ void tx_dma_stop(pc300_t * card, int ch)
571 cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); 572 cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
572} 573}
573 574
574void rx_dma_stop(pc300_t * card, int ch) 575static void rx_dma_stop(pc300_t * card, int ch)
575{ 576{
576 void __iomem *scabase = card->hw.scabase; 577 void __iomem *scabase = card->hw.scabase;
577 ucchar drr_ena_bit = 1 << (4 + 2 * ch); 578 ucchar drr_ena_bit = 1 << (4 + 2 * ch);
@@ -582,7 +583,7 @@ void rx_dma_stop(pc300_t * card, int ch)
582 cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit); 583 cpc_writeb(scabase + DRR, drr_rst_bit & ~drr_ena_bit);
583} 584}
584 585
585void rx_dma_start(pc300_t * card, int ch) 586static void rx_dma_start(pc300_t * card, int ch)
586{ 587{
587 void __iomem *scabase = card->hw.scabase; 588 void __iomem *scabase = card->hw.scabase;
588 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 589 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -607,7 +608,7 @@ void rx_dma_start(pc300_t * card, int ch)
607/*************************/ 608/*************************/
608/*** FALC Routines ***/ 609/*** FALC Routines ***/
609/*************************/ 610/*************************/
610void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) 611static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
611{ 612{
612 void __iomem *falcbase = card->hw.falcbase; 613 void __iomem *falcbase = card->hw.falcbase;
613 unsigned long i = 0; 614 unsigned long i = 0;
@@ -622,7 +623,7 @@ void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd)
622 cpc_writeb(falcbase + F_REG(CMDR, ch), cmd); 623 cpc_writeb(falcbase + F_REG(CMDR, ch), cmd);
623} 624}
624 625
625void falc_intr_enable(pc300_t * card, int ch) 626static void falc_intr_enable(pc300_t * card, int ch)
626{ 627{
627 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 628 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
628 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 629 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -672,7 +673,7 @@ void falc_intr_enable(pc300_t * card, int ch)
672 } 673 }
673} 674}
674 675
675void falc_open_timeslot(pc300_t * card, int ch, int timeslot) 676static void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
676{ 677{
677 void __iomem *falcbase = card->hw.falcbase; 678 void __iomem *falcbase = card->hw.falcbase;
678 ucchar tshf = card->chan[ch].falc.offset; 679 ucchar tshf = card->chan[ch].falc.offset;
@@ -688,7 +689,7 @@ void falc_open_timeslot(pc300_t * card, int ch, int timeslot)
688 (0x80 >> (timeslot & 0x07))); 689 (0x80 >> (timeslot & 0x07)));
689} 690}
690 691
691void falc_close_timeslot(pc300_t * card, int ch, int timeslot) 692static void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
692{ 693{
693 void __iomem *falcbase = card->hw.falcbase; 694 void __iomem *falcbase = card->hw.falcbase;
694 ucchar tshf = card->chan[ch].falc.offset; 695 ucchar tshf = card->chan[ch].falc.offset;
@@ -704,7 +705,7 @@ void falc_close_timeslot(pc300_t * card, int ch, int timeslot)
704 ~(0x80 >> (timeslot & 0x07))); 705 ~(0x80 >> (timeslot & 0x07)));
705} 706}
706 707
707void falc_close_all_timeslots(pc300_t * card, int ch) 708static void falc_close_all_timeslots(pc300_t * card, int ch)
708{ 709{
709 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 710 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
710 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 711 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -726,7 +727,7 @@ void falc_close_all_timeslots(pc300_t * card, int ch)
726 } 727 }
727} 728}
728 729
729void falc_open_all_timeslots(pc300_t * card, int ch) 730static void falc_open_all_timeslots(pc300_t * card, int ch)
730{ 731{
731 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 732 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
732 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 733 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -758,7 +759,7 @@ void falc_open_all_timeslots(pc300_t * card, int ch)
758 } 759 }
759} 760}
760 761
761void falc_init_timeslot(pc300_t * card, int ch) 762static void falc_init_timeslot(pc300_t * card, int ch)
762{ 763{
763 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 764 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
764 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 765 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -776,7 +777,7 @@ void falc_init_timeslot(pc300_t * card, int ch)
776 } 777 }
777} 778}
778 779
779void falc_enable_comm(pc300_t * card, int ch) 780static void falc_enable_comm(pc300_t * card, int ch)
780{ 781{
781 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 782 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
782 falc_t *pfalc = (falc_t *) & chan->falc; 783 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -792,7 +793,7 @@ void falc_enable_comm(pc300_t * card, int ch)
792 ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); 793 ~((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
793} 794}
794 795
795void falc_disable_comm(pc300_t * card, int ch) 796static void falc_disable_comm(pc300_t * card, int ch)
796{ 797{
797 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 798 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
798 falc_t *pfalc = (falc_t *) & chan->falc; 799 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -806,7 +807,7 @@ void falc_disable_comm(pc300_t * card, int ch)
806 ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch))); 807 ((CPLD_REG1_FALC_DCD | CPLD_REG1_FALC_CTS) << (2 * ch)));
807} 808}
808 809
809void falc_init_t1(pc300_t * card, int ch) 810static void falc_init_t1(pc300_t * card, int ch)
810{ 811{
811 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 812 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
812 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 813 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -975,7 +976,7 @@ void falc_init_t1(pc300_t * card, int ch)
975 falc_close_all_timeslots(card, ch); 976 falc_close_all_timeslots(card, ch);
976} 977}
977 978
978void falc_init_e1(pc300_t * card, int ch) 979static void falc_init_e1(pc300_t * card, int ch)
979{ 980{
980 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 981 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
981 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 982 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1155,7 +1156,7 @@ void falc_init_e1(pc300_t * card, int ch)
1155 falc_close_all_timeslots(card, ch); 1156 falc_close_all_timeslots(card, ch);
1156} 1157}
1157 1158
1158void falc_init_hdlc(pc300_t * card, int ch) 1159static void falc_init_hdlc(pc300_t * card, int ch)
1159{ 1160{
1160 void __iomem *falcbase = card->hw.falcbase; 1161 void __iomem *falcbase = card->hw.falcbase;
1161 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1162 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
@@ -1181,7 +1182,7 @@ void falc_init_hdlc(pc300_t * card, int ch)
1181 falc_intr_enable(card, ch); 1182 falc_intr_enable(card, ch);
1182} 1183}
1183 1184
1184void te_config(pc300_t * card, int ch) 1185static void te_config(pc300_t * card, int ch)
1185{ 1186{
1186 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1187 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1187 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1188 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1241,7 +1242,7 @@ void te_config(pc300_t * card, int ch)
1241 CPC_UNLOCK(card, flags); 1242 CPC_UNLOCK(card, flags);
1242} 1243}
1243 1244
1244void falc_check_status(pc300_t * card, int ch, unsigned char frs0) 1245static void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
1245{ 1246{
1246 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1247 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1247 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1248 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1397,7 +1398,7 @@ void falc_check_status(pc300_t * card, int ch, unsigned char frs0)
1397 } 1398 }
1398} 1399}
1399 1400
1400void falc_update_stats(pc300_t * card, int ch) 1401static void falc_update_stats(pc300_t * card, int ch)
1401{ 1402{
1402 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1403 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1403 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1404 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1450,7 +1451,7 @@ void falc_update_stats(pc300_t * card, int ch)
1450 * the synchronizer and then sent to the system interface. 1451 * the synchronizer and then sent to the system interface.
1451 *---------------------------------------------------------------------------- 1452 *----------------------------------------------------------------------------
1452 */ 1453 */
1453void falc_remote_loop(pc300_t * card, int ch, int loop_on) 1454static void falc_remote_loop(pc300_t * card, int ch, int loop_on)
1454{ 1455{
1455 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1456 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1456 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1457 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1495,7 +1496,7 @@ void falc_remote_loop(pc300_t * card, int ch, int loop_on)
1495 * coding must be identical. 1496 * coding must be identical.
1496 *---------------------------------------------------------------------------- 1497 *----------------------------------------------------------------------------
1497 */ 1498 */
1498void falc_local_loop(pc300_t * card, int ch, int loop_on) 1499static void falc_local_loop(pc300_t * card, int ch, int loop_on)
1499{ 1500{
1500 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1501 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1501 falc_t *pfalc = (falc_t *) & chan->falc; 1502 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1522,7 +1523,7 @@ void falc_local_loop(pc300_t * card, int ch, int loop_on)
1522 * looped. They are originated by the FALC-LH transmitter. 1523 * looped. They are originated by the FALC-LH transmitter.
1523 *---------------------------------------------------------------------------- 1524 *----------------------------------------------------------------------------
1524 */ 1525 */
1525void falc_payload_loop(pc300_t * card, int ch, int loop_on) 1526static void falc_payload_loop(pc300_t * card, int ch, int loop_on)
1526{ 1527{
1527 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1528 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1528 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1529 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1576,7 +1577,7 @@ void falc_payload_loop(pc300_t * card, int ch, int loop_on)
1576 * Description: Turns XLU bit off in the proper register 1577 * Description: Turns XLU bit off in the proper register
1577 *---------------------------------------------------------------------------- 1578 *----------------------------------------------------------------------------
1578 */ 1579 */
1579void turn_off_xlu(pc300_t * card, int ch) 1580static void turn_off_xlu(pc300_t * card, int ch)
1580{ 1581{
1581 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1582 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1582 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1583 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1597,7 +1598,7 @@ void turn_off_xlu(pc300_t * card, int ch)
1597 * Description: Turns XLD bit off in the proper register 1598 * Description: Turns XLD bit off in the proper register
1598 *---------------------------------------------------------------------------- 1599 *----------------------------------------------------------------------------
1599 */ 1600 */
1600void turn_off_xld(pc300_t * card, int ch) 1601static void turn_off_xld(pc300_t * card, int ch)
1601{ 1602{
1602 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1603 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1603 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1604 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1619,7 +1620,7 @@ void turn_off_xld(pc300_t * card, int ch)
1619 * to generate a LOOP activation code over a T1/E1 line. 1620 * to generate a LOOP activation code over a T1/E1 line.
1620 *---------------------------------------------------------------------------- 1621 *----------------------------------------------------------------------------
1621 */ 1622 */
1622void falc_generate_loop_up_code(pc300_t * card, int ch) 1623static void falc_generate_loop_up_code(pc300_t * card, int ch)
1623{ 1624{
1624 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1625 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1625 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1626 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1652,7 +1653,7 @@ void falc_generate_loop_up_code(pc300_t * card, int ch)
1652 * to generate a LOOP deactivation code over a T1/E1 line. 1653 * to generate a LOOP deactivation code over a T1/E1 line.
1653 *---------------------------------------------------------------------------- 1654 *----------------------------------------------------------------------------
1654 */ 1655 */
1655void falc_generate_loop_down_code(pc300_t * card, int ch) 1656static void falc_generate_loop_down_code(pc300_t * card, int ch)
1656{ 1657{
1657 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1658 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1658 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1659 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1682,7 +1683,7 @@ void falc_generate_loop_down_code(pc300_t * card, int ch)
1682 * it on the reception side. 1683 * it on the reception side.
1683 *---------------------------------------------------------------------------- 1684 *----------------------------------------------------------------------------
1684 */ 1685 */
1685void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) 1686static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
1686{ 1687{
1687 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1688 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1688 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 1689 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -1729,7 +1730,7 @@ void falc_pattern_test(pc300_t * card, int ch, unsigned int activate)
1729 * Description: This routine returns the bit error counter value 1730 * Description: This routine returns the bit error counter value
1730 *---------------------------------------------------------------------------- 1731 *----------------------------------------------------------------------------
1731 */ 1732 */
1732ucshort falc_pattern_test_error(pc300_t * card, int ch) 1733static ucshort falc_pattern_test_error(pc300_t * card, int ch)
1733{ 1734{
1734 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; 1735 pc300ch_t *chan = (pc300ch_t *) & card->chan[ch];
1735 falc_t *pfalc = (falc_t *) & chan->falc; 1736 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -1769,7 +1770,7 @@ cpc_trace(struct net_device *dev, struct sk_buff *skb_main, char rx_tx)
1769 netif_rx(skb); 1770 netif_rx(skb);
1770} 1771}
1771 1772
1772void cpc_tx_timeout(struct net_device *dev) 1773static void cpc_tx_timeout(struct net_device *dev)
1773{ 1774{
1774 pc300dev_t *d = (pc300dev_t *) dev->priv; 1775 pc300dev_t *d = (pc300dev_t *) dev->priv;
1775 pc300ch_t *chan = (pc300ch_t *) d->chan; 1776 pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -1797,7 +1798,7 @@ void cpc_tx_timeout(struct net_device *dev)
1797 netif_wake_queue(dev); 1798 netif_wake_queue(dev);
1798} 1799}
1799 1800
1800int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) 1801static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1801{ 1802{
1802 pc300dev_t *d = (pc300dev_t *) dev->priv; 1803 pc300dev_t *d = (pc300dev_t *) dev->priv;
1803 pc300ch_t *chan = (pc300ch_t *) d->chan; 1804 pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -1880,7 +1881,7 @@ int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1880 return 0; 1881 return 0;
1881} 1882}
1882 1883
1883void cpc_net_rx(struct net_device *dev) 1884static void cpc_net_rx(struct net_device *dev)
1884{ 1885{
1885 pc300dev_t *d = (pc300dev_t *) dev->priv; 1886 pc300dev_t *d = (pc300dev_t *) dev->priv;
1886 pc300ch_t *chan = (pc300ch_t *) d->chan; 1887 pc300ch_t *chan = (pc300ch_t *) d->chan;
@@ -2403,7 +2404,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id, struct pt_regs *regs)
2403 return IRQ_HANDLED; 2404 return IRQ_HANDLED;
2404} 2405}
2405 2406
2406void cpc_sca_status(pc300_t * card, int ch) 2407static void cpc_sca_status(pc300_t * card, int ch)
2407{ 2408{
2408 ucchar ilar; 2409 ucchar ilar;
2409 void __iomem *scabase = card->hw.scabase; 2410 void __iomem *scabase = card->hw.scabase;
@@ -2495,7 +2496,7 @@ void cpc_sca_status(pc300_t * card, int ch)
2495 } 2496 }
2496} 2497}
2497 2498
2498void cpc_falc_status(pc300_t * card, int ch) 2499static void cpc_falc_status(pc300_t * card, int ch)
2499{ 2500{
2500 pc300ch_t *chan = &card->chan[ch]; 2501 pc300ch_t *chan = &card->chan[ch];
2501 falc_t *pfalc = (falc_t *) & chan->falc; 2502 falc_t *pfalc = (falc_t *) & chan->falc;
@@ -2523,7 +2524,7 @@ void cpc_falc_status(pc300_t * card, int ch)
2523 CPC_UNLOCK(card, flags); 2524 CPC_UNLOCK(card, flags);
2524} 2525}
2525 2526
2526int cpc_change_mtu(struct net_device *dev, int new_mtu) 2527static int cpc_change_mtu(struct net_device *dev, int new_mtu)
2527{ 2528{
2528 if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU)) 2529 if ((new_mtu < 128) || (new_mtu > PC300_DEF_MTU))
2529 return -EINVAL; 2530 return -EINVAL;
@@ -2531,7 +2532,7 @@ int cpc_change_mtu(struct net_device *dev, int new_mtu)
2531 return 0; 2532 return 0;
2532} 2533}
2533 2534
2534int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2535static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2535{ 2536{
2536 hdlc_device *hdlc = dev_to_hdlc(dev); 2537 hdlc_device *hdlc = dev_to_hdlc(dev);
2537 pc300dev_t *d = (pc300dev_t *) dev->priv; 2538 pc300dev_t *d = (pc300dev_t *) dev->priv;
@@ -2856,7 +2857,7 @@ static int clock_rate_calc(uclong rate, uclong clock, int *br_io)
2856 } 2857 }
2857} 2858}
2858 2859
2859int ch_config(pc300dev_t * d) 2860static int ch_config(pc300dev_t * d)
2860{ 2861{
2861 pc300ch_t *chan = (pc300ch_t *) d->chan; 2862 pc300ch_t *chan = (pc300ch_t *) d->chan;
2862 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; 2863 pc300chconf_t *conf = (pc300chconf_t *) & chan->conf;
@@ -3004,7 +3005,7 @@ int ch_config(pc300dev_t * d)
3004 return 0; 3005 return 0;
3005} 3006}
3006 3007
3007int rx_config(pc300dev_t * d) 3008static int rx_config(pc300dev_t * d)
3008{ 3009{
3009 pc300ch_t *chan = (pc300ch_t *) d->chan; 3010 pc300ch_t *chan = (pc300ch_t *) d->chan;
3010 pc300_t *card = (pc300_t *) chan->card; 3011 pc300_t *card = (pc300_t *) chan->card;
@@ -3035,7 +3036,7 @@ int rx_config(pc300dev_t * d)
3035 return 0; 3036 return 0;
3036} 3037}
3037 3038
3038int tx_config(pc300dev_t * d) 3039static int tx_config(pc300dev_t * d)
3039{ 3040{
3040 pc300ch_t *chan = (pc300ch_t *) d->chan; 3041 pc300ch_t *chan = (pc300ch_t *) d->chan;
3041 pc300_t *card = (pc300_t *) chan->card; 3042 pc300_t *card = (pc300_t *) chan->card;
@@ -3098,7 +3099,7 @@ static int cpc_attach(struct net_device *dev, unsigned short encoding,
3098 return 0; 3099 return 0;
3099} 3100}
3100 3101
3101void cpc_opench(pc300dev_t * d) 3102static void cpc_opench(pc300dev_t * d)
3102{ 3103{
3103 pc300ch_t *chan = (pc300ch_t *) d->chan; 3104 pc300ch_t *chan = (pc300ch_t *) d->chan;
3104 pc300_t *card = (pc300_t *) chan->card; 3105 pc300_t *card = (pc300_t *) chan->card;
@@ -3116,7 +3117,7 @@ void cpc_opench(pc300dev_t * d)
3116 cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR)); 3117 cpc_readb(scabase + M_REG(CTL, ch)) & ~(CTL_RTS | CTL_DTR));
3117} 3118}
3118 3119
3119void cpc_closech(pc300dev_t * d) 3120static void cpc_closech(pc300dev_t * d)
3120{ 3121{
3121 pc300ch_t *chan = (pc300ch_t *) d->chan; 3122 pc300ch_t *chan = (pc300ch_t *) d->chan;
3122 pc300_t *card = (pc300_t *) chan->card; 3123 pc300_t *card = (pc300_t *) chan->card;
@@ -3173,7 +3174,7 @@ int cpc_open(struct net_device *dev)
3173 return 0; 3174 return 0;
3174} 3175}
3175 3176
3176int cpc_close(struct net_device *dev) 3177static int cpc_close(struct net_device *dev)
3177{ 3178{
3178 hdlc_device *hdlc = dev_to_hdlc(dev); 3179 hdlc_device *hdlc = dev_to_hdlc(dev);
3179 pc300dev_t *d = (pc300dev_t *) dev->priv; 3180 pc300dev_t *d = (pc300dev_t *) dev->priv;
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 8454bf6caaa7..52f26b9c69d2 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -112,10 +112,10 @@ typedef struct _st_cpc_tty_area {
112static struct tty_driver serial_drv; 112static struct tty_driver serial_drv;
113 113
114/* local variables */ 114/* local variables */
115st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS]; 115static st_cpc_tty_area cpc_tty_area[CPC_TTY_NPORTS];
116 116
117int cpc_tty_cnt=0; /* number of intrfaces configured with MLPPP */ 117static int cpc_tty_cnt = 0; /* number of intrfaces configured with MLPPP */
118int cpc_tty_unreg_flag = 0; 118static int cpc_tty_unreg_flag = 0;
119 119
120/* TTY functions prototype */ 120/* TTY functions prototype */
121static int cpc_tty_open(struct tty_struct *tty, struct file *flip); 121static int cpc_tty_open(struct tty_struct *tty, struct file *flip);
@@ -132,9 +132,9 @@ static void cpc_tty_trace(pc300dev_t *dev, char* buf, int len, char rxtx);
132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char); 132static void cpc_tty_signal_off(pc300dev_t *pc300dev, unsigned char);
133static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char); 133static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char);
134 134
135int pc300_tiocmset(struct tty_struct *, struct file *, 135static int pc300_tiocmset(struct tty_struct *, struct file *,
136 unsigned int, unsigned int); 136 unsigned int, unsigned int);
137int pc300_tiocmget(struct tty_struct *, struct file *); 137static int pc300_tiocmget(struct tty_struct *, struct file *);
138 138
139/* functions called by PC300 driver */ 139/* functions called by PC300 driver */
140void cpc_tty_init(pc300dev_t *dev); 140void cpc_tty_init(pc300dev_t *dev);
@@ -538,8 +538,8 @@ static int cpc_tty_chars_in_buffer(struct tty_struct *tty)
538 return(0); 538 return(0);
539} 539}
540 540
541int pc300_tiocmset(struct tty_struct *tty, struct file *file, 541static int pc300_tiocmset(struct tty_struct *tty, struct file *file,
542 unsigned int set, unsigned int clear) 542 unsigned int set, unsigned int clear)
543{ 543{
544 st_cpc_tty_area *cpc_tty; 544 st_cpc_tty_area *cpc_tty;
545 545
@@ -565,7 +565,7 @@ int pc300_tiocmset(struct tty_struct *tty, struct file *file,
565 return 0; 565 return 0;
566} 566}
567 567
568int pc300_tiocmget(struct tty_struct *tty, struct file *file) 568static int pc300_tiocmget(struct tty_struct *tty, struct file *file)
569{ 569{
570 unsigned int result; 570 unsigned int result;
571 unsigned char status; 571 unsigned char status;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 3ac9a45b20fa..036adc4f8ba7 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -182,7 +182,7 @@ static char sdla_byte(struct net_device *dev, int addr)
182 return(byte); 182 return(byte);
183} 183}
184 184
185void sdla_stop(struct net_device *dev) 185static void sdla_stop(struct net_device *dev)
186{ 186{
187 struct frad_local *flp; 187 struct frad_local *flp;
188 188
@@ -209,7 +209,7 @@ void sdla_stop(struct net_device *dev)
209 } 209 }
210} 210}
211 211
212void sdla_start(struct net_device *dev) 212static void sdla_start(struct net_device *dev)
213{ 213{
214 struct frad_local *flp; 214 struct frad_local *flp;
215 215
@@ -247,7 +247,7 @@ void sdla_start(struct net_device *dev)
247 * 247 *
248 ***************************************************/ 248 ***************************************************/
249 249
250int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2) 250static int sdla_z80_poll(struct net_device *dev, int z80_addr, int jiffs, char resp1, char resp2)
251{ 251{
252 unsigned long start, done, now; 252 unsigned long start, done, now;
253 char resp, *temp; 253 char resp, *temp;
@@ -505,7 +505,7 @@ static int sdla_cmd(struct net_device *dev, int cmd, short dlci, short flags,
505 505
506static int sdla_reconfig(struct net_device *dev); 506static int sdla_reconfig(struct net_device *dev);
507 507
508int sdla_activate(struct net_device *slave, struct net_device *master) 508static int sdla_activate(struct net_device *slave, struct net_device *master)
509{ 509{
510 struct frad_local *flp; 510 struct frad_local *flp;
511 int i; 511 int i;
@@ -527,7 +527,7 @@ int sdla_activate(struct net_device *slave, struct net_device *master)
527 return(0); 527 return(0);
528} 528}
529 529
530int sdla_deactivate(struct net_device *slave, struct net_device *master) 530static int sdla_deactivate(struct net_device *slave, struct net_device *master)
531{ 531{
532 struct frad_local *flp; 532 struct frad_local *flp;
533 int i; 533 int i;
@@ -549,7 +549,7 @@ int sdla_deactivate(struct net_device *slave, struct net_device *master)
549 return(0); 549 return(0);
550} 550}
551 551
552int sdla_assoc(struct net_device *slave, struct net_device *master) 552static int sdla_assoc(struct net_device *slave, struct net_device *master)
553{ 553{
554 struct frad_local *flp; 554 struct frad_local *flp;
555 int i; 555 int i;
@@ -585,7 +585,7 @@ int sdla_assoc(struct net_device *slave, struct net_device *master)
585 return(0); 585 return(0);
586} 586}
587 587
588int sdla_deassoc(struct net_device *slave, struct net_device *master) 588static int sdla_deassoc(struct net_device *slave, struct net_device *master)
589{ 589{
590 struct frad_local *flp; 590 struct frad_local *flp;
591 int i; 591 int i;
@@ -613,7 +613,7 @@ int sdla_deassoc(struct net_device *slave, struct net_device *master)
613 return(0); 613 return(0);
614} 614}
615 615
616int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get) 616static int sdla_dlci_conf(struct net_device *slave, struct net_device *master, int get)
617{ 617{
618 struct frad_local *flp; 618 struct frad_local *flp;
619 struct dlci_local *dlp; 619 struct dlci_local *dlp;
@@ -1324,7 +1324,7 @@ NOTE: This is rather a useless action right now, as the
1324 return(0); 1324 return(0);
1325} 1325}
1326 1326
1327int sdla_change_mtu(struct net_device *dev, int new_mtu) 1327static int sdla_change_mtu(struct net_device *dev, int new_mtu)
1328{ 1328{
1329 struct frad_local *flp; 1329 struct frad_local *flp;
1330 1330
@@ -1337,7 +1337,7 @@ int sdla_change_mtu(struct net_device *dev, int new_mtu)
1337 return(-EOPNOTSUPP); 1337 return(-EOPNOTSUPP);
1338} 1338}
1339 1339
1340int sdla_set_config(struct net_device *dev, struct ifmap *map) 1340static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1341{ 1341{
1342 struct frad_local *flp; 1342 struct frad_local *flp;
1343 int i; 1343 int i;
diff --git a/drivers/net/wan/sdla_fr.c b/drivers/net/wan/sdla_fr.c
index 0497dbdb8631..7f1ce9d4333e 100644
--- a/drivers/net/wan/sdla_fr.c
+++ b/drivers/net/wan/sdla_fr.c
@@ -822,7 +822,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev,
822 chan->card = card; 822 chan->card = card;
823 823
824 /* verify media address */ 824 /* verify media address */
825 if (is_digit(conf->addr[0])) { 825 if (isdigit(conf->addr[0])) {
826 826
827 dlci = dec_to_uint(conf->addr, 0); 827 dlci = dec_to_uint(conf->addr, 0);
828 828
@@ -3456,7 +3456,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len)
3456 if (!len) 3456 if (!len)
3457 len = strlen(str); 3457 len = strlen(str);
3458 3458
3459 for (val = 0; len && is_digit(*str); ++str, --len) 3459 for (val = 0; len && isdigit(*str); ++str, --len)
3460 val = (val * 10) + (*str - (unsigned)'0'); 3460 val = (val * 10) + (*str - (unsigned)'0');
3461 3461
3462 return val; 3462 return val;
diff --git a/drivers/net/wan/sdla_x25.c b/drivers/net/wan/sdla_x25.c
index 8a95d61a2f8f..63f846d6f3a6 100644
--- a/drivers/net/wan/sdla_x25.c
+++ b/drivers/net/wan/sdla_x25.c
@@ -957,7 +957,7 @@ static int new_if(struct wan_device* wandev, struct net_device* dev,
957 chan->hold_timeout = (conf->hold_timeout) ? 957 chan->hold_timeout = (conf->hold_timeout) ?
958 conf->hold_timeout : 10; 958 conf->hold_timeout : 10;
959 959
960 }else if (is_digit(conf->addr[0])){ /* PVC */ 960 }else if (isdigit(conf->addr[0])){ /* PVC */
961 int lcn = dec_to_uint(conf->addr, 0); 961 int lcn = dec_to_uint(conf->addr, 0);
962 962
963 if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){ 963 if ((lcn >= card->u.x.lo_pvc) && (lcn <= card->u.x.hi_pvc)){
@@ -3875,7 +3875,7 @@ static unsigned int dec_to_uint (unsigned char* str, int len)
3875 if (!len) 3875 if (!len)
3876 len = strlen(str); 3876 len = strlen(str);
3877 3877
3878 for (val = 0; len && is_digit(*str); ++str, --len) 3878 for (val = 0; len && isdigit(*str); ++str, --len)
3879 val = (val * 10) + (*str - (unsigned)'0'); 3879 val = (val * 10) + (*str - (unsigned)'0');
3880 3880
3881 return val; 3881 return val;
@@ -3896,9 +3896,9 @@ static unsigned int hex_to_uint (unsigned char* str, int len)
3896 for (val = 0; len; ++str, --len) 3896 for (val = 0; len; ++str, --len)
3897 { 3897 {
3898 ch = *str; 3898 ch = *str;
3899 if (is_digit(ch)) 3899 if (isdigit(ch))
3900 val = (val << 4) + (ch - (unsigned)'0'); 3900 val = (val << 4) + (ch - (unsigned)'0');
3901 else if (is_hex_digit(ch)) 3901 else if (isxdigit(ch))
3902 val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10); 3902 val = (val << 4) + ((ch & 0xDF) - (unsigned)'A' + 10);
3903 else break; 3903 else break;
3904 } 3904 }
diff --git a/drivers/net/wan/sdladrv.c b/drivers/net/wan/sdladrv.c
index c8bc6da57a41..7c2cf2e76300 100644
--- a/drivers/net/wan/sdladrv.c
+++ b/drivers/net/wan/sdladrv.c
@@ -642,9 +642,7 @@ int sdla_mapmem (sdlahw_t* hw, unsigned long addr)
642 * Enable interrupt generation. 642 * Enable interrupt generation.
643 */ 643 */
644 644
645EXPORT_SYMBOL(sdla_inten); 645static int sdla_inten (sdlahw_t* hw)
646
647int sdla_inten (sdlahw_t* hw)
648{ 646{
649 unsigned port = hw->port; 647 unsigned port = hw->port;
650 int tmp, i; 648 int tmp, i;
@@ -698,8 +696,7 @@ int sdla_inten (sdlahw_t* hw)
698 * Disable interrupt generation. 696 * Disable interrupt generation.
699 */ 697 */
700 698
701EXPORT_SYMBOL(sdla_intde); 699#if 0
702
703int sdla_intde (sdlahw_t* hw) 700int sdla_intde (sdlahw_t* hw)
704{ 701{
705 unsigned port = hw->port; 702 unsigned port = hw->port;
@@ -748,14 +745,13 @@ int sdla_intde (sdlahw_t* hw)
748 } 745 }
749 return 0; 746 return 0;
750} 747}
748#endif /* 0 */
751 749
752/*============================================================================ 750/*============================================================================
753 * Acknowledge SDLA hardware interrupt. 751 * Acknowledge SDLA hardware interrupt.
754 */ 752 */
755 753
756EXPORT_SYMBOL(sdla_intack); 754static int sdla_intack (sdlahw_t* hw)
757
758int sdla_intack (sdlahw_t* hw)
759{ 755{
760 unsigned port = hw->port; 756 unsigned port = hw->port;
761 int tmp; 757 int tmp;
@@ -827,8 +823,7 @@ void read_S514_int_stat (sdlahw_t* hw, u32* int_status)
827 * Generate an interrupt to adapter's CPU. 823 * Generate an interrupt to adapter's CPU.
828 */ 824 */
829 825
830EXPORT_SYMBOL(sdla_intr); 826#if 0
831
832int sdla_intr (sdlahw_t* hw) 827int sdla_intr (sdlahw_t* hw)
833{ 828{
834 unsigned port = hw->port; 829 unsigned port = hw->port;
@@ -863,6 +858,7 @@ int sdla_intr (sdlahw_t* hw)
863 } 858 }
864 return 0; 859 return 0;
865} 860}
861#endif /* 0 */
866 862
867/*============================================================================ 863/*============================================================================
868 * Execute Adapter Command. 864 * Execute Adapter Command.
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index a6d3b55013a5..2d1bba06a085 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -221,7 +221,7 @@ static void sppp_clear_timeout(struct sppp *p)
221 * here. 221 * here.
222 */ 222 */
223 223
224void sppp_input (struct net_device *dev, struct sk_buff *skb) 224static void sppp_input (struct net_device *dev, struct sk_buff *skb)
225{ 225{
226 struct ppp_header *h; 226 struct ppp_header *h;
227 struct sppp *sp = (struct sppp *)sppp_of(dev); 227 struct sppp *sp = (struct sppp *)sppp_of(dev);
@@ -355,8 +355,6 @@ done:
355 return; 355 return;
356} 356}
357 357
358EXPORT_SYMBOL(sppp_input);
359
360/* 358/*
361 * Handle transmit packets. 359 * Handle transmit packets.
362 */ 360 */
@@ -990,7 +988,7 @@ EXPORT_SYMBOL(sppp_reopen);
990 * the mtu is out of range. 988 * the mtu is out of range.
991 */ 989 */
992 990
993int sppp_change_mtu(struct net_device *dev, int new_mtu) 991static int sppp_change_mtu(struct net_device *dev, int new_mtu)
994{ 992{
995 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP)) 993 if(new_mtu<128||new_mtu>PPP_MTU||(dev->flags&IFF_UP))
996 return -EINVAL; 994 return -EINVAL;
@@ -998,8 +996,6 @@ int sppp_change_mtu(struct net_device *dev, int new_mtu)
998 return 0; 996 return 0;
999} 997}
1000 998
1001EXPORT_SYMBOL(sppp_change_mtu);
1002
1003/** 999/**
1004 * sppp_do_ioctl - Ioctl handler for ppp/hdlc 1000 * sppp_do_ioctl - Ioctl handler for ppp/hdlc
1005 * @dev: Device subject to ioctl 1001 * @dev: Device subject to ioctl
@@ -1456,7 +1452,7 @@ static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_t
1456 return 0; 1452 return 0;
1457} 1453}
1458 1454
1459struct packet_type sppp_packet_type = { 1455static struct packet_type sppp_packet_type = {
1460 .type = __constant_htons(ETH_P_WAN_PPP), 1456 .type = __constant_htons(ETH_P_WAN_PPP),
1461 .func = sppp_rcv, 1457 .func = sppp_rcv,
1462}; 1458};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 06998c2240d9..750c0167539c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -35,6 +35,7 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/scatterlist.h>
38#include <asm/io.h> 39#include <asm/io.h>
39#include <asm/system.h> 40#include <asm/system.h>
40 41
@@ -1046,7 +1047,6 @@ static WifiCtlHdr wifictlhdr8023 = {
1046 } 1047 }
1047}; 1048};
1048 1049
1049#ifdef WIRELESS_EXT
1050// Frequency list (map channels to frequencies) 1050// Frequency list (map channels to frequencies)
1051static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 1051static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
1052 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; 1052 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
@@ -1067,7 +1067,6 @@ typedef struct wep_key_t {
1067 1067
1068/* List of Wireless Handlers (new API) */ 1068/* List of Wireless Handlers (new API) */
1069static const struct iw_handler_def airo_handler_def; 1069static const struct iw_handler_def airo_handler_def;
1070#endif /* WIRELESS_EXT */
1071 1070
1072static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)"; 1071static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
1073 1072
@@ -1110,10 +1109,8 @@ static irqreturn_t airo_interrupt( int irq, void* dev_id, struct pt_regs
1110static int airo_thread(void *data); 1109static int airo_thread(void *data);
1111static void timer_func( struct net_device *dev ); 1110static void timer_func( struct net_device *dev );
1112static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 1111static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1113#ifdef WIRELESS_EXT
1114static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev); 1112static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
1115static void airo_read_wireless_stats (struct airo_info *local); 1113static void airo_read_wireless_stats (struct airo_info *local);
1116#endif /* WIRELESS_EXT */
1117#ifdef CISCO_EXT 1114#ifdef CISCO_EXT
1118static int readrids(struct net_device *dev, aironet_ioctl *comp); 1115static int readrids(struct net_device *dev, aironet_ioctl *comp);
1119static int writerids(struct net_device *dev, aironet_ioctl *comp); 1116static int writerids(struct net_device *dev, aironet_ioctl *comp);
@@ -1187,12 +1184,10 @@ struct airo_info {
1187 int fid; 1184 int fid;
1188 } xmit, xmit11; 1185 } xmit, xmit11;
1189 struct net_device *wifidev; 1186 struct net_device *wifidev;
1190#ifdef WIRELESS_EXT
1191 struct iw_statistics wstats; // wireless stats 1187 struct iw_statistics wstats; // wireless stats
1192 unsigned long scan_timestamp; /* Time started to scan */ 1188 unsigned long scan_timestamp; /* Time started to scan */
1193 struct iw_spy_data spy_data; 1189 struct iw_spy_data spy_data;
1194 struct iw_public_data wireless_data; 1190 struct iw_public_data wireless_data;
1195#endif /* WIRELESS_EXT */
1196#ifdef MICSUPPORT 1191#ifdef MICSUPPORT
1197 /* MIC stuff */ 1192 /* MIC stuff */
1198 struct crypto_tfm *tfm; 1193 struct crypto_tfm *tfm;
@@ -1596,11 +1591,9 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct
1596 aes_counter[12] = (u8)(counter >> 24); 1591 aes_counter[12] = (u8)(counter >> 24);
1597 counter++; 1592 counter++;
1598 memcpy (plain, aes_counter, 16); 1593 memcpy (plain, aes_counter, 16);
1599 sg[0].page = virt_to_page(plain); 1594 sg_set_buf(sg, plain, 16);
1600 sg[0].offset = ((long) plain & ~PAGE_MASK);
1601 sg[0].length = 16;
1602 crypto_cipher_encrypt(tfm, sg, sg, 16); 1595 crypto_cipher_encrypt(tfm, sg, sg, 16);
1603 cipher = kmap(sg[0].page) + sg[0].offset; 1596 cipher = kmap(sg->page) + sg->offset;
1604 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) { 1597 for (j=0; (j<16) && (i< (sizeof(context->coeff)/sizeof(context->coeff[0]))); ) {
1605 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]); 1598 context->coeff[i++] = ntohl(*(u32 *)&cipher[j]);
1606 j += 4; 1599 j += 4;
@@ -2387,14 +2380,10 @@ void stop_airo_card( struct net_device *dev, int freeres )
2387 dev_kfree_skb(skb); 2380 dev_kfree_skb(skb);
2388 } 2381 }
2389 2382
2390 if (ai->flash) 2383 kfree(ai->flash);
2391 kfree(ai->flash); 2384 kfree(ai->rssi);
2392 if (ai->rssi) 2385 kfree(ai->APList);
2393 kfree(ai->rssi); 2386 kfree(ai->SSID);
2394 if (ai->APList)
2395 kfree(ai->APList);
2396 if (ai->SSID)
2397 kfree(ai->SSID);
2398 if (freeres) { 2387 if (freeres) {
2399 /* PCMCIA frees this stuff, so only for PCI and ISA */ 2388 /* PCMCIA frees this stuff, so only for PCI and ISA */
2400 release_region( dev->base_addr, 64 ); 2389 release_region( dev->base_addr, 64 );
@@ -2527,7 +2516,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2527 unsigned long mem_start, mem_len, aux_start, aux_len; 2516 unsigned long mem_start, mem_len, aux_start, aux_len;
2528 int rc = -1; 2517 int rc = -1;
2529 int i; 2518 int i;
2530 unsigned char *busaddroff,*vpackoff; 2519 dma_addr_t busaddroff;
2520 unsigned char *vpackoff;
2531 unsigned char __iomem *pciaddroff; 2521 unsigned char __iomem *pciaddroff;
2532 2522
2533 mem_start = pci_resource_start(pci, 1); 2523 mem_start = pci_resource_start(pci, 1);
@@ -2570,7 +2560,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2570 /* 2560 /*
2571 * Setup descriptor RX, TX, CONFIG 2561 * Setup descriptor RX, TX, CONFIG
2572 */ 2562 */
2573 busaddroff = (unsigned char *)ai->shared_dma; 2563 busaddroff = ai->shared_dma;
2574 pciaddroff = ai->pciaux + AUX_OFFSET; 2564 pciaddroff = ai->pciaux + AUX_OFFSET;
2575 vpackoff = ai->shared; 2565 vpackoff = ai->shared;
2576 2566
@@ -2579,7 +2569,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2579 ai->rxfids[i].pending = 0; 2569 ai->rxfids[i].pending = 0;
2580 ai->rxfids[i].card_ram_off = pciaddroff; 2570 ai->rxfids[i].card_ram_off = pciaddroff;
2581 ai->rxfids[i].virtual_host_addr = vpackoff; 2571 ai->rxfids[i].virtual_host_addr = vpackoff;
2582 ai->rxfids[i].rx_desc.host_addr = (dma_addr_t) busaddroff; 2572 ai->rxfids[i].rx_desc.host_addr = busaddroff;
2583 ai->rxfids[i].rx_desc.valid = 1; 2573 ai->rxfids[i].rx_desc.valid = 1;
2584 ai->rxfids[i].rx_desc.len = PKTSIZE; 2574 ai->rxfids[i].rx_desc.len = PKTSIZE;
2585 ai->rxfids[i].rx_desc.rdy = 0; 2575 ai->rxfids[i].rx_desc.rdy = 0;
@@ -2594,7 +2584,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2594 ai->txfids[i].card_ram_off = pciaddroff; 2584 ai->txfids[i].card_ram_off = pciaddroff;
2595 ai->txfids[i].virtual_host_addr = vpackoff; 2585 ai->txfids[i].virtual_host_addr = vpackoff;
2596 ai->txfids[i].tx_desc.valid = 1; 2586 ai->txfids[i].tx_desc.valid = 1;
2597 ai->txfids[i].tx_desc.host_addr = (dma_addr_t) busaddroff; 2587 ai->txfids[i].tx_desc.host_addr = busaddroff;
2598 memcpy(ai->txfids[i].virtual_host_addr, 2588 memcpy(ai->txfids[i].virtual_host_addr,
2599 &wifictlhdr8023, sizeof(wifictlhdr8023)); 2589 &wifictlhdr8023, sizeof(wifictlhdr8023));
2600 2590
@@ -2607,8 +2597,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci,
2607 /* Rid descriptor setup */ 2597 /* Rid descriptor setup */
2608 ai->config_desc.card_ram_off = pciaddroff; 2598 ai->config_desc.card_ram_off = pciaddroff;
2609 ai->config_desc.virtual_host_addr = vpackoff; 2599 ai->config_desc.virtual_host_addr = vpackoff;
2610 ai->config_desc.rid_desc.host_addr = (dma_addr_t) busaddroff; 2600 ai->config_desc.rid_desc.host_addr = busaddroff;
2611 ai->ridbus = (dma_addr_t)busaddroff; 2601 ai->ridbus = busaddroff;
2612 ai->config_desc.rid_desc.rid = 0; 2602 ai->config_desc.rid_desc.rid = 0;
2613 ai->config_desc.rid_desc.len = RIDSIZE; 2603 ai->config_desc.rid_desc.len = RIDSIZE;
2614 ai->config_desc.rid_desc.valid = 1; 2604 ai->config_desc.rid_desc.valid = 1;
@@ -2647,9 +2637,7 @@ static void wifi_setup(struct net_device *dev)
2647 dev->get_stats = &airo_get_stats; 2637 dev->get_stats = &airo_get_stats;
2648 dev->set_mac_address = &airo_set_mac_address; 2638 dev->set_mac_address = &airo_set_mac_address;
2649 dev->do_ioctl = &airo_ioctl; 2639 dev->do_ioctl = &airo_ioctl;
2650#ifdef WIRELESS_EXT
2651 dev->wireless_handlers = &airo_handler_def; 2640 dev->wireless_handlers = &airo_handler_def;
2652#endif /* WIRELESS_EXT */
2653 dev->change_mtu = &airo_change_mtu; 2641 dev->change_mtu = &airo_change_mtu;
2654 dev->open = &airo_open; 2642 dev->open = &airo_open;
2655 dev->stop = &airo_close; 2643 dev->stop = &airo_close;
@@ -2675,9 +2663,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
2675 dev->priv = ethdev->priv; 2663 dev->priv = ethdev->priv;
2676 dev->irq = ethdev->irq; 2664 dev->irq = ethdev->irq;
2677 dev->base_addr = ethdev->base_addr; 2665 dev->base_addr = ethdev->base_addr;
2678#ifdef WIRELESS_EXT
2679 dev->wireless_data = ethdev->wireless_data; 2666 dev->wireless_data = ethdev->wireless_data;
2680#endif /* WIRELESS_EXT */
2681 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len); 2667 memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
2682 err = register_netdev(dev); 2668 err = register_netdev(dev);
2683 if (err<0) { 2669 if (err<0) {
@@ -2755,11 +2741,9 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2755 dev->set_multicast_list = &airo_set_multicast_list; 2741 dev->set_multicast_list = &airo_set_multicast_list;
2756 dev->set_mac_address = &airo_set_mac_address; 2742 dev->set_mac_address = &airo_set_mac_address;
2757 dev->do_ioctl = &airo_ioctl; 2743 dev->do_ioctl = &airo_ioctl;
2758#ifdef WIRELESS_EXT
2759 dev->wireless_handlers = &airo_handler_def; 2744 dev->wireless_handlers = &airo_handler_def;
2760 ai->wireless_data.spy_data = &ai->spy_data; 2745 ai->wireless_data.spy_data = &ai->spy_data;
2761 dev->wireless_data = &ai->wireless_data; 2746 dev->wireless_data = &ai->wireless_data;
2762#endif /* WIRELESS_EXT */
2763 dev->change_mtu = &airo_change_mtu; 2747 dev->change_mtu = &airo_change_mtu;
2764 dev->open = &airo_open; 2748 dev->open = &airo_open;
2765 dev->stop = &airo_close; 2749 dev->stop = &airo_close;
@@ -3637,10 +3621,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3637 int rc; 3621 int rc;
3638 3622
3639 memset( &mySsid, 0, sizeof( mySsid ) ); 3623 memset( &mySsid, 0, sizeof( mySsid ) );
3640 if (ai->flash) { 3624 kfree (ai->flash);
3641 kfree (ai->flash); 3625 ai->flash = NULL;
3642 ai->flash = NULL;
3643 }
3644 3626
3645 /* The NOP is the first step in getting the card going */ 3627 /* The NOP is the first step in getting the card going */
3646 cmd.cmd = NOP; 3628 cmd.cmd = NOP;
@@ -3677,14 +3659,10 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3677 tdsRssiRid rssi_rid; 3659 tdsRssiRid rssi_rid;
3678 CapabilityRid cap_rid; 3660 CapabilityRid cap_rid;
3679 3661
3680 if (ai->APList) { 3662 kfree(ai->APList);
3681 kfree(ai->APList); 3663 ai->APList = NULL;
3682 ai->APList = NULL; 3664 kfree(ai->SSID);
3683 } 3665 ai->SSID = NULL;
3684 if (ai->SSID) {
3685 kfree(ai->SSID);
3686 ai->SSID = NULL;
3687 }
3688 // general configuration (read/modify/write) 3666 // general configuration (read/modify/write)
3689 status = readConfigRid(ai, lock); 3667 status = readConfigRid(ai, lock);
3690 if ( status != SUCCESS ) return ERROR; 3668 if ( status != SUCCESS ) return ERROR;
@@ -3698,10 +3676,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
3698 memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */ 3676 memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
3699 } 3677 }
3700 else { 3678 else {
3701 if (ai->rssi) { 3679 kfree(ai->rssi);
3702 kfree(ai->rssi); 3680 ai->rssi = NULL;
3703 ai->rssi = NULL;
3704 }
3705 if (cap_rid.softCap & 8) 3681 if (cap_rid.softCap & 8)
3706 ai->config.rmode |= RXMODE_NORMALIZED_RSSI; 3682 ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
3707 else 3683 else
@@ -5380,11 +5356,13 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
5380 5356
5381static int proc_close( struct inode *inode, struct file *file ) 5357static int proc_close( struct inode *inode, struct file *file )
5382{ 5358{
5383 struct proc_data *data = (struct proc_data *)file->private_data; 5359 struct proc_data *data = file->private_data;
5384 if ( data->on_close != NULL ) data->on_close( inode, file ); 5360
5385 if ( data->rbuffer ) kfree( data->rbuffer ); 5361 if (data->on_close != NULL)
5386 if ( data->wbuffer ) kfree( data->wbuffer ); 5362 data->on_close(inode, file);
5387 kfree( data ); 5363 kfree(data->rbuffer);
5364 kfree(data->wbuffer);
5365 kfree(data);
5388 return 0; 5366 return 0;
5389} 5367}
5390 5368
@@ -5515,12 +5493,13 @@ static int airo_pci_resume(struct pci_dev *pdev)
5515 struct net_device *dev = pci_get_drvdata(pdev); 5493 struct net_device *dev = pci_get_drvdata(pdev);
5516 struct airo_info *ai = dev->priv; 5494 struct airo_info *ai = dev->priv;
5517 Resp rsp; 5495 Resp rsp;
5496 pci_power_t prev_state = pdev->current_state;
5518 5497
5519 pci_set_power_state(pdev, 0); 5498 pci_set_power_state(pdev, PCI_D0);
5520 pci_restore_state(pdev); 5499 pci_restore_state(pdev);
5521 pci_enable_wake(pdev, pci_choose_state(pdev, ai->power), 0); 5500 pci_enable_wake(pdev, PCI_D0, 0);
5522 5501
5523 if (ai->power.event > 1) { 5502 if (prev_state != PCI_D1) {
5524 reset_card(dev, 0); 5503 reset_card(dev, 0);
5525 mpi_init_descriptors(ai); 5504 mpi_init_descriptors(ai);
5526 setup_card(ai, dev->dev_addr, 0); 5505 setup_card(ai, dev->dev_addr, 0);
@@ -5598,7 +5577,6 @@ static void __exit airo_cleanup_module( void )
5598 remove_proc_entry("aironet", proc_root_driver); 5577 remove_proc_entry("aironet", proc_root_driver);
5599} 5578}
5600 5579
5601#ifdef WIRELESS_EXT
5602/* 5580/*
5603 * Initial Wireless Extension code for Aironet driver by : 5581 * Initial Wireless Extension code for Aironet driver by :
5604 * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00 5582 * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
@@ -7107,8 +7085,6 @@ static const struct iw_handler_def airo_handler_def =
7107 .get_wireless_stats = airo_get_wireless_stats, 7085 .get_wireless_stats = airo_get_wireless_stats,
7108}; 7086};
7109 7087
7110#endif /* WIRELESS_EXT */
7111
7112/* 7088/*
7113 * This defines the configuration part of the Wireless Extensions 7089 * This defines the configuration part of the Wireless Extensions
7114 * Note : irq and spinlock protection will occur in the subroutines 7090 * Note : irq and spinlock protection will occur in the subroutines
@@ -7187,7 +7163,6 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
7187 return rc; 7163 return rc;
7188} 7164}
7189 7165
7190#ifdef WIRELESS_EXT
7191/* 7166/*
7192 * Get the Wireless stats out of the driver 7167 * Get the Wireless stats out of the driver
7193 * Note : irq and spinlock protection will occur in the subroutines 7168 * Note : irq and spinlock protection will occur in the subroutines
@@ -7260,7 +7235,6 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
7260 7235
7261 return &local->wstats; 7236 return &local->wstats;
7262} 7237}
7263#endif /* WIRELESS_EXT */
7264 7238
7265#ifdef CISCO_EXT 7239#ifdef CISCO_EXT
7266/* 7240/*
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index bf25584d68d3..784de9109113 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -258,9 +258,7 @@ static void airo_detach(dev_link_t *link)
258 258
259 /* Unlink device structure, free pieces */ 259 /* Unlink device structure, free pieces */
260 *linkp = link->next; 260 *linkp = link->next;
261 if (link->priv) { 261 kfree(link->priv);
262 kfree(link->priv);
263 }
264 kfree(link); 262 kfree(link);
265 263
266} /* airo_detach */ 264} /* airo_detach */
diff --git a/drivers/net/wireless/airport.c b/drivers/net/wireless/airport.c
index 9d496703c465..7b321f7cf358 100644
--- a/drivers/net/wireless/airport.c
+++ b/drivers/net/wireless/airport.c
@@ -15,28 +15,11 @@
15#define PFX DRIVER_NAME ": " 15#define PFX DRIVER_NAME ": "
16 16
17#include <linux/config.h> 17#include <linux/config.h>
18
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/ptrace.h> 21#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/string.h>
25#include <linux/timer.h>
26#include <linux/ioport.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/etherdevice.h>
30#include <linux/wireless.h>
31
32#include <asm/io.h>
33#include <asm/system.h>
34#include <asm/current.h>
35#include <asm/prom.h>
36#include <asm/machdep.h>
37#include <asm/pmac_feature.h> 22#include <asm/pmac_feature.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40 23
41#include "orinoco.h" 24#include "orinoco.h"
42 25
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 587869d86eee..1fbe027d26b6 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_private *priv);
618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data); 618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
619static void atmel_command_irq(struct atmel_private *priv); 619static void atmel_command_irq(struct atmel_private *priv);
620static int atmel_validate_channel(struct atmel_private *priv, int channel); 620static int atmel_validate_channel(struct atmel_private *priv, int channel);
621static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 621static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
622 u16 frame_len, u8 rssi); 622 u16 frame_len, u8 rssi);
623static void atmel_management_timer(u_long a); 623static void atmel_management_timer(u_long a);
624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size); 624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size); 625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
627 u8 *body, int body_len); 627 u8 *body, int body_len);
628 628
629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); 629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -827,7 +827,7 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
827static int start_tx (struct sk_buff *skb, struct net_device *dev) 827static int start_tx (struct sk_buff *skb, struct net_device *dev)
828{ 828{
829 struct atmel_private *priv = netdev_priv(dev); 829 struct atmel_private *priv = netdev_priv(dev);
830 struct ieee80211_hdr header; 830 struct ieee80211_hdr_4addr header;
831 unsigned long flags; 831 unsigned long flags;
832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; 832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; 833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
@@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
902} 902}
903 903
904static void atmel_transmit_management_frame(struct atmel_private *priv, 904static void atmel_transmit_management_frame(struct atmel_private *priv,
905 struct ieee80211_hdr *header, 905 struct ieee80211_hdr_4addr *header,
906 u8 *body, int body_len) 906 u8 *body, int body_len)
907{ 907{
908 u16 buff; 908 u16 buff;
@@ -917,7 +917,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT); 917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
918} 918}
919 919
920static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, 920static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
921 u16 msdu_size, u16 rx_packet_loc, u32 crc) 921 u16 msdu_size, u16 rx_packet_loc, u32 crc)
922{ 922{
923 /* fast path: unfragmented packet copy directly into skbuf */ 923 /* fast path: unfragmented packet copy directly into skbuf */
@@ -990,7 +990,7 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
990 return (crc ^ 0xffffffff) == netcrc; 990 return (crc ^ 0xffffffff) == netcrc;
991} 991}
992 992
993static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, 993static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags) 994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
995{ 995{
996 u8 mac4[6]; 996 u8 mac4[6];
@@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *heade
1082static void rx_done_irq(struct atmel_private *priv) 1082static void rx_done_irq(struct atmel_private *priv)
1083{ 1083{
1084 int i; 1084 int i;
1085 struct ieee80211_hdr header; 1085 struct ieee80211_hdr_4addr header;
1086 1086
1087 for (i = 0; 1087 for (i = 0;
1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && 1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -1653,8 +1653,7 @@ void stop_atmel_card(struct net_device *dev, int freeres)
1653 unregister_netdev(dev); 1653 unregister_netdev(dev);
1654 remove_proc_entry("driver/atmel", NULL); 1654 remove_proc_entry("driver/atmel", NULL);
1655 free_irq(dev->irq, dev); 1655 free_irq(dev->irq, dev);
1656 if (priv->firmware) 1656 kfree(priv->firmware);
1657 kfree(priv->firmware);
1658 if (freeres) { 1657 if (freeres) {
1659 /* PCMCIA frees this stuff, so only for PCI */ 1658 /* PCMCIA frees this stuff, so only for PCI */
1660 release_region(dev->base_addr, 64); 1659 release_region(dev->base_addr, 64);
@@ -2450,8 +2449,7 @@ static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2450 break; 2449 break;
2451 } 2450 }
2452 2451
2453 if (priv->firmware) 2452 kfree(priv->firmware);
2454 kfree(priv->firmware);
2455 2453
2456 priv->firmware = new_firmware; 2454 priv->firmware = new_firmware;
2457 priv->firmware_length = com.len; 2455 priv->firmware_length = com.len;
@@ -2650,7 +2648,7 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
2650 2648
2651static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len) 2649static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
2652{ 2650{
2653 struct ieee80211_hdr header; 2651 struct ieee80211_hdr_4addr header;
2654 struct auth_body auth; 2652 struct auth_body auth;
2655 2653
2656 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 2654 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
@@ -2688,7 +2686,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2688{ 2686{
2689 u8 *ssid_el_p; 2687 u8 *ssid_el_p;
2690 int bodysize; 2688 int bodysize;
2691 struct ieee80211_hdr header; 2689 struct ieee80211_hdr_4addr header;
2692 struct ass_req_format { 2690 struct ass_req_format {
2693 u16 capability; 2691 u16 capability;
2694 u16 listen_interval; 2692 u16 listen_interval;
@@ -2738,7 +2736,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2738 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize); 2736 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
2739} 2737}
2740 2738
2741static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header) 2739static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr_4addr *header)
2742{ 2740{
2743 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 2741 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
2744 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; 2742 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
@@ -2788,7 +2786,7 @@ static int retrieve_bss(struct atmel_private *priv)
2788} 2786}
2789 2787
2790 2788
2791static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header, 2789static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
2792 u16 capability, u16 beacon_period, u8 channel, u8 rssi, 2790 u16 capability, u16 beacon_period, u8 channel, u8 rssi,
2793 u8 ssid_len, u8 *ssid, int is_beacon) 2791 u8 ssid_len, u8 *ssid, int is_beacon)
2794{ 2792{
@@ -3072,7 +3070,7 @@ static void atmel_smooth_qual(struct atmel_private *priv)
3072} 3070}
3073 3071
3074/* deals with incoming managment frames. */ 3072/* deals with incoming managment frames. */
3075static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 3073static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr_4addr *header,
3076 u16 frame_len, u8 rssi) 3074 u16 frame_len, u8 rssi)
3077{ 3075{
3078 u16 subtype; 3076 u16 subtype;
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index ff031a3985b3..195cb36619e8 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -259,8 +259,7 @@ static void atmel_detach(dev_link_t *link)
259 259
260 /* Unlink device structure, free pieces */ 260 /* Unlink device structure, free pieces */
261 *linkp = link->next; 261 *linkp = link->next;
262 if (link->priv) 262 kfree(link->priv);
263 kfree(link->priv);
264 kfree(link); 263 kfree(link);
265} 264}
266 265
diff --git a/drivers/net/wireless/hermes.c b/drivers/net/wireless/hermes.c
index 21c3d0d227e6..579480dad374 100644
--- a/drivers/net/wireless/hermes.c
+++ b/drivers/net/wireless/hermes.c
@@ -39,17 +39,10 @@
39 */ 39 */
40 40
41#include <linux/config.h> 41#include <linux/config.h>
42
43#include <linux/module.h> 42#include <linux/module.h>
44#include <linux/types.h>
45#include <linux/threads.h>
46#include <linux/smp.h>
47#include <asm/io.h>
48#include <linux/delay.h>
49#include <linux/init.h>
50#include <linux/kernel.h> 43#include <linux/kernel.h>
51#include <linux/net.h> 44#include <linux/init.h>
52#include <asm/errno.h> 45#include <linux/delay.h>
53 46
54#include "hermes.h" 47#include "hermes.h"
55 48
@@ -451,6 +444,43 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
451 return err; 444 return err;
452} 445}
453 446
447/* Write a block of data to the chip's buffer with padding if
448 * neccessary, via the BAP. Synchronization/serialization is the
449 * caller's problem. len must be even.
450 *
451 * Returns: < 0 on internal failure (errno), 0 on success, > 0 on error from firmware
452 */
453int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf, unsigned data_len, unsigned len,
454 u16 id, u16 offset)
455{
456 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
457 int err = 0;
458
459 if (len < 0 || len % 2 || data_len > len)
460 return -EINVAL;
461
462 err = hermes_bap_seek(hw, bap, id, offset);
463 if (err)
464 goto out;
465
466 /* Transfer all the complete words of data */
467 hermes_write_words(hw, dreg, buf, data_len/2);
468 /* If there is an odd byte left over pad and transfer it */
469 if (data_len & 1) {
470 u8 end[2];
471 end[1] = 0;
472 end[0] = ((unsigned char *)buf)[data_len - 1];
473 hermes_write_words(hw, dreg, end, 1);
474 data_len ++;
475 }
476 /* Now send zeros for the padding */
477 if (data_len < len)
478 hermes_clear_words(hw, dreg, (len - data_len) / 2);
479 /* Complete */
480 out:
481 return err;
482}
483
454/* Read a Length-Type-Value record from the card. 484/* Read a Length-Type-Value record from the card.
455 * 485 *
456 * If length is NULL, we ignore the length read from the card, and 486 * If length is NULL, we ignore the length read from the card, and
@@ -538,6 +568,7 @@ EXPORT_SYMBOL(hermes_allocate);
538 568
539EXPORT_SYMBOL(hermes_bap_pread); 569EXPORT_SYMBOL(hermes_bap_pread);
540EXPORT_SYMBOL(hermes_bap_pwrite); 570EXPORT_SYMBOL(hermes_bap_pwrite);
571EXPORT_SYMBOL(hermes_bap_pwrite_pad);
541EXPORT_SYMBOL(hermes_read_ltv); 572EXPORT_SYMBOL(hermes_read_ltv);
542EXPORT_SYMBOL(hermes_write_ltv); 573EXPORT_SYMBOL(hermes_write_ltv);
543 574
diff --git a/drivers/net/wireless/hermes.h b/drivers/net/wireless/hermes.h
index 8c9e874c9118..a6bd472d75d4 100644
--- a/drivers/net/wireless/hermes.h
+++ b/drivers/net/wireless/hermes.h
@@ -30,9 +30,8 @@
30 * access to the hermes_t structure, and to the hardware 30 * access to the hermes_t structure, and to the hardware
31*/ 31*/
32 32
33#include <linux/delay.h>
34#include <linux/if_ether.h> 33#include <linux/if_ether.h>
35#include <asm/byteorder.h> 34#include <asm/io.h>
36 35
37/* 36/*
38 * Limits and constants 37 * Limits and constants
@@ -192,13 +191,13 @@
192#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */ 191#define HERMES_RXSTAT_WMP (0x6000) /* Wavelan-II Management Protocol frame */
193 192
194struct hermes_tx_descriptor { 193struct hermes_tx_descriptor {
195 u16 status; 194 __le16 status;
196 u16 reserved1; 195 __le16 reserved1;
197 u16 reserved2; 196 __le16 reserved2;
198 u32 sw_support; 197 __le32 sw_support;
199 u8 retry_count; 198 u8 retry_count;
200 u8 tx_rate; 199 u8 tx_rate;
201 u16 tx_control; 200 __le16 tx_control;
202} __attribute__ ((packed)); 201} __attribute__ ((packed));
203 202
204#define HERMES_TXSTAT_RETRYERR (0x0001) 203#define HERMES_TXSTAT_RETRYERR (0x0001)
@@ -222,60 +221,60 @@ struct hermes_tx_descriptor {
222#define HERMES_INQ_SEC_STAT_AGERE (0xF202) 221#define HERMES_INQ_SEC_STAT_AGERE (0xF202)
223 222
224struct hermes_tallies_frame { 223struct hermes_tallies_frame {
225 u16 TxUnicastFrames; 224 __le16 TxUnicastFrames;
226 u16 TxMulticastFrames; 225 __le16 TxMulticastFrames;
227 u16 TxFragments; 226 __le16 TxFragments;
228 u16 TxUnicastOctets; 227 __le16 TxUnicastOctets;
229 u16 TxMulticastOctets; 228 __le16 TxMulticastOctets;
230 u16 TxDeferredTransmissions; 229 __le16 TxDeferredTransmissions;
231 u16 TxSingleRetryFrames; 230 __le16 TxSingleRetryFrames;
232 u16 TxMultipleRetryFrames; 231 __le16 TxMultipleRetryFrames;
233 u16 TxRetryLimitExceeded; 232 __le16 TxRetryLimitExceeded;
234 u16 TxDiscards; 233 __le16 TxDiscards;
235 u16 RxUnicastFrames; 234 __le16 RxUnicastFrames;
236 u16 RxMulticastFrames; 235 __le16 RxMulticastFrames;
237 u16 RxFragments; 236 __le16 RxFragments;
238 u16 RxUnicastOctets; 237 __le16 RxUnicastOctets;
239 u16 RxMulticastOctets; 238 __le16 RxMulticastOctets;
240 u16 RxFCSErrors; 239 __le16 RxFCSErrors;
241 u16 RxDiscards_NoBuffer; 240 __le16 RxDiscards_NoBuffer;
242 u16 TxDiscardsWrongSA; 241 __le16 TxDiscardsWrongSA;
243 u16 RxWEPUndecryptable; 242 __le16 RxWEPUndecryptable;
244 u16 RxMsgInMsgFragments; 243 __le16 RxMsgInMsgFragments;
245 u16 RxMsgInBadMsgFragments; 244 __le16 RxMsgInBadMsgFragments;
246 /* Those last are probably not available in very old firmwares */ 245 /* Those last are probably not available in very old firmwares */
247 u16 RxDiscards_WEPICVError; 246 __le16 RxDiscards_WEPICVError;
248 u16 RxDiscards_WEPExcluded; 247 __le16 RxDiscards_WEPExcluded;
249} __attribute__ ((packed)); 248} __attribute__ ((packed));
250 249
251/* Grabbed from wlan-ng - Thanks Mark... - Jean II 250/* Grabbed from wlan-ng - Thanks Mark... - Jean II
252 * This is the result of a scan inquiry command */ 251 * This is the result of a scan inquiry command */
253/* Structure describing info about an Access Point */ 252/* Structure describing info about an Access Point */
254struct prism2_scan_apinfo { 253struct prism2_scan_apinfo {
255 u16 channel; /* Channel where the AP sits */ 254 __le16 channel; /* Channel where the AP sits */
256 u16 noise; /* Noise level */ 255 __le16 noise; /* Noise level */
257 u16 level; /* Signal level */ 256 __le16 level; /* Signal level */
258 u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */ 257 u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
259 u16 beacon_interv; /* Beacon interval */ 258 __le16 beacon_interv; /* Beacon interval */
260 u16 capabilities; /* Capabilities */ 259 __le16 capabilities; /* Capabilities */
261 u16 essid_len; /* ESSID length */ 260 __le16 essid_len; /* ESSID length */
262 u8 essid[32]; /* ESSID of the network */ 261 u8 essid[32]; /* ESSID of the network */
263 u8 rates[10]; /* Bit rate supported */ 262 u8 rates[10]; /* Bit rate supported */
264 u16 proberesp_rate; /* Data rate of the response frame */ 263 __le16 proberesp_rate; /* Data rate of the response frame */
265 u16 atim; /* ATIM window time, Kus (hostscan only) */ 264 __le16 atim; /* ATIM window time, Kus (hostscan only) */
266} __attribute__ ((packed)); 265} __attribute__ ((packed));
267 266
268/* Same stuff for the Lucent/Agere card. 267/* Same stuff for the Lucent/Agere card.
269 * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */ 268 * Thanks to h1kari <h1kari AT dachb0den.com> - Jean II */
270struct agere_scan_apinfo { 269struct agere_scan_apinfo {
271 u16 channel; /* Channel where the AP sits */ 270 __le16 channel; /* Channel where the AP sits */
272 u16 noise; /* Noise level */ 271 __le16 noise; /* Noise level */
273 u16 level; /* Signal level */ 272 __le16 level; /* Signal level */
274 u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */ 273 u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
275 u16 beacon_interv; /* Beacon interval */ 274 __le16 beacon_interv; /* Beacon interval */
276 u16 capabilities; /* Capabilities */ 275 __le16 capabilities; /* Capabilities */
277 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ 276 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
278 u16 essid_len; /* ESSID length */ 277 __le16 essid_len; /* ESSID length */
279 u8 essid[32]; /* ESSID of the network */ 278 u8 essid[32]; /* ESSID of the network */
280} __attribute__ ((packed)); 279} __attribute__ ((packed));
281 280
@@ -283,16 +282,16 @@ struct agere_scan_apinfo {
283struct symbol_scan_apinfo { 282struct symbol_scan_apinfo {
284 u8 channel; /* Channel where the AP sits */ 283 u8 channel; /* Channel where the AP sits */
285 u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */ 284 u8 unknown1; /* 8 in 2.9x and 3.9x f/w, 0 otherwise */
286 u16 noise; /* Noise level */ 285 __le16 noise; /* Noise level */
287 u16 level; /* Signal level */ 286 __le16 level; /* Signal level */
288 u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */ 287 u8 bssid[ETH_ALEN]; /* MAC address of the Access Point */
289 u16 beacon_interv; /* Beacon interval */ 288 __le16 beacon_interv; /* Beacon interval */
290 u16 capabilities; /* Capabilities */ 289 __le16 capabilities; /* Capabilities */
291 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */ 290 /* bits: 0-ess, 1-ibss, 4-privacy [wep] */
292 u16 essid_len; /* ESSID length */ 291 __le16 essid_len; /* ESSID length */
293 u8 essid[32]; /* ESSID of the network */ 292 u8 essid[32]; /* ESSID of the network */
294 u16 rates[5]; /* Bit rate supported */ 293 __le16 rates[5]; /* Bit rate supported */
295 u16 basic_rates; /* Basic rates bitmask */ 294 __le16 basic_rates; /* Basic rates bitmask */
296 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */ 295 u8 unknown2[6]; /* Always FF:FF:FF:FF:00:00 */
297 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */ 296 u8 unknown3[8]; /* Always 0, appeared in f/w 3.91-68 */
298} __attribute__ ((packed)); 297} __attribute__ ((packed));
@@ -312,7 +311,7 @@ union hermes_scan_info {
312#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006) 311#define HERMES_LINKSTATUS_ASSOC_FAILED (0x0006)
313 312
314struct hermes_linkstatus { 313struct hermes_linkstatus {
315 u16 linkstatus; /* Link status */ 314 __le16 linkstatus; /* Link status */
316} __attribute__ ((packed)); 315} __attribute__ ((packed));
317 316
318struct hermes_response { 317struct hermes_response {
@@ -321,8 +320,8 @@ struct hermes_response {
321 320
322/* "ID" structure - used for ESSID and station nickname */ 321/* "ID" structure - used for ESSID and station nickname */
323struct hermes_idstring { 322struct hermes_idstring {
324 u16 len; 323 __le16 len;
325 u16 val[16]; 324 __le16 val[16];
326} __attribute__ ((packed)); 325} __attribute__ ((packed));
327 326
328struct hermes_multicast { 327struct hermes_multicast {
@@ -377,6 +376,8 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, unsigned len,
377 u16 id, u16 offset); 376 u16 id, u16 offset);
378int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len, 377int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, unsigned len,
379 u16 id, u16 offset); 378 u16 id, u16 offset);
379int hermes_bap_pwrite_pad(hermes_t *hw, int bap, const void *buf,
380 unsigned data_len, unsigned len, u16 id, u16 offset);
380int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen, 381int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
381 u16 *length, void *buf); 382 u16 *length, void *buf);
382int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 383int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
@@ -447,7 +448,7 @@ static inline void hermes_clear_words(struct hermes *hw, int off, unsigned count
447 448
448static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word) 449static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
449{ 450{
450 u16 rec; 451 __le16 rec;
451 int err; 452 int err;
452 453
453 err = HERMES_READ_RECORD(hw, bap, rid, &rec); 454 err = HERMES_READ_RECORD(hw, bap, rid, &rec);
@@ -457,7 +458,7 @@ static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
457 458
458static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word) 459static inline int hermes_write_wordrec(hermes_t *hw, int bap, u16 rid, u16 word)
459{ 460{
460 u16 rec = cpu_to_le16(word); 461 __le16 rec = cpu_to_le16(word);
461 return HERMES_WRITE_RECORD(hw, bap, rid, &rec); 462 return HERMES_WRITE_RECORD(hw, bap, rid, &rec);
462} 463}
463 464
diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap.c
index e7f5821b4942..6a96cd9f2685 100644
--- a/drivers/net/wireless/hostap/hostap.c
+++ b/drivers/net/wireless/hostap/hostap.c
@@ -716,9 +716,6 @@ static int prism2_close(struct net_device *dev)
716 hostap_deauth_all_stas(dev, local->ap, 1); 716 hostap_deauth_all_stas(dev, local->ap, 1);
717#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 717#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
718 718
719 if (local->func->dev_close && local->func->dev_close(local))
720 return 0;
721
722 if (dev == local->dev) { 719 if (dev == local->dev) {
723 local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL); 720 local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL);
724 } 721 }
@@ -766,9 +763,6 @@ static int prism2_open(struct net_device *dev)
766 local->hw_downloading) 763 local->hw_downloading)
767 return -ENODEV; 764 return -ENODEV;
768 765
769 if (local->func->dev_open && local->func->dev_open(local))
770 return 1;
771
772 if (!try_module_get(local->hw_module)) 766 if (!try_module_get(local->hw_module))
773 return -ENODEV; 767 return -ENODEV;
774 local->num_dev_open++; 768 local->num_dev_open++;
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index b0501243b175..ffac50899454 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -6,10 +6,10 @@
6void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, 6void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
7 struct hostap_80211_rx_status *rx_stats) 7 struct hostap_80211_rx_status *rx_stats)
8{ 8{
9 struct ieee80211_hdr *hdr; 9 struct ieee80211_hdr_4addr *hdr;
10 u16 fc; 10 u16 fc;
11 11
12 hdr = (struct ieee80211_hdr *) skb->data; 12 hdr = (struct ieee80211_hdr_4addr *) skb->data;
13 13
14 printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d " 14 printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
15 "jiffies=%ld\n", 15 "jiffies=%ld\n",
@@ -51,7 +51,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
51 int hdrlen, phdrlen, head_need, tail_need; 51 int hdrlen, phdrlen, head_need, tail_need;
52 u16 fc; 52 u16 fc;
53 int prism_header, ret; 53 int prism_header, ret;
54 struct ieee80211_hdr *hdr; 54 struct ieee80211_hdr_4addr *hdr;
55 55
56 iface = netdev_priv(dev); 56 iface = netdev_priv(dev);
57 local = iface->local; 57 local = iface->local;
@@ -70,7 +70,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
70 phdrlen = 0; 70 phdrlen = 0;
71 } 71 }
72 72
73 hdr = (struct ieee80211_hdr *) skb->data; 73 hdr = (struct ieee80211_hdr_4addr *) skb->data;
74 fc = le16_to_cpu(hdr->frame_ctl); 74 fc = le16_to_cpu(hdr->frame_ctl);
75 75
76 if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) { 76 if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
@@ -215,7 +215,7 @@ prism2_frag_cache_find(local_info_t *local, unsigned int seq,
215 215
216/* Called only as a tasklet (software IRQ) */ 216/* Called only as a tasklet (software IRQ) */
217static struct sk_buff * 217static struct sk_buff *
218prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr) 218prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr_4addr *hdr)
219{ 219{
220 struct sk_buff *skb = NULL; 220 struct sk_buff *skb = NULL;
221 u16 sc; 221 u16 sc;
@@ -229,7 +229,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
229 if (frag == 0) { 229 if (frag == 0) {
230 /* Reserve enough space to fit maximum frame length */ 230 /* Reserve enough space to fit maximum frame length */
231 skb = dev_alloc_skb(local->dev->mtu + 231 skb = dev_alloc_skb(local->dev->mtu +
232 sizeof(struct ieee80211_hdr) + 232 sizeof(struct ieee80211_hdr_4addr) +
233 8 /* LLC */ + 233 8 /* LLC */ +
234 2 /* alignment */ + 234 2 /* alignment */ +
235 8 /* WEP */ + ETH_ALEN /* WDS */); 235 8 /* WEP */ + ETH_ALEN /* WDS */);
@@ -267,7 +267,7 @@ prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
267 267
268/* Called only as a tasklet (software IRQ) */ 268/* Called only as a tasklet (software IRQ) */
269static int prism2_frag_cache_invalidate(local_info_t *local, 269static int prism2_frag_cache_invalidate(local_info_t *local,
270 struct ieee80211_hdr *hdr) 270 struct ieee80211_hdr_4addr *hdr)
271{ 271{
272 u16 sc; 272 u16 sc;
273 unsigned int seq; 273 unsigned int seq;
@@ -441,7 +441,7 @@ hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
441 u16 stype) 441 u16 stype)
442{ 442{
443 if (local->iw_mode == IW_MODE_MASTER) { 443 if (local->iw_mode == IW_MODE_MASTER) {
444 hostap_update_sta_ps(local, (struct ieee80211_hdr *) 444 hostap_update_sta_ps(local, (struct ieee80211_hdr_4addr *)
445 skb->data); 445 skb->data);
446 } 446 }
447 447
@@ -520,7 +520,7 @@ static inline struct net_device *prism2_rx_get_wds(local_info_t *local,
520 520
521 521
522static inline int 522static inline int
523hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, 523hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
524 u16 fc, struct net_device **wds) 524 u16 fc, struct net_device **wds)
525{ 525{
526 /* FIX: is this really supposed to accept WDS frames only in Master 526 /* FIX: is this really supposed to accept WDS frames only in Master
@@ -579,13 +579,13 @@ static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
579{ 579{
580 struct net_device *dev = local->dev; 580 struct net_device *dev = local->dev;
581 u16 fc, ethertype; 581 u16 fc, ethertype;
582 struct ieee80211_hdr *hdr; 582 struct ieee80211_hdr_4addr *hdr;
583 u8 *pos; 583 u8 *pos;
584 584
585 if (skb->len < 24) 585 if (skb->len < 24)
586 return 0; 586 return 0;
587 587
588 hdr = (struct ieee80211_hdr *) skb->data; 588 hdr = (struct ieee80211_hdr_4addr *) skb->data;
589 fc = le16_to_cpu(hdr->frame_ctl); 589 fc = le16_to_cpu(hdr->frame_ctl);
590 590
591 /* check that the frame is unicast frame to us */ 591 /* check that the frame is unicast frame to us */
@@ -619,13 +619,13 @@ static inline int
619hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, 619hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
620 struct ieee80211_crypt_data *crypt) 620 struct ieee80211_crypt_data *crypt)
621{ 621{
622 struct ieee80211_hdr *hdr; 622 struct ieee80211_hdr_4addr *hdr;
623 int res, hdrlen; 623 int res, hdrlen;
624 624
625 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) 625 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
626 return 0; 626 return 0;
627 627
628 hdr = (struct ieee80211_hdr *) skb->data; 628 hdr = (struct ieee80211_hdr_4addr *) skb->data;
629 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 629 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
630 630
631 if (local->tkip_countermeasures && 631 if (local->tkip_countermeasures &&
@@ -658,13 +658,13 @@ static inline int
658hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, 658hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
659 int keyidx, struct ieee80211_crypt_data *crypt) 659 int keyidx, struct ieee80211_crypt_data *crypt)
660{ 660{
661 struct ieee80211_hdr *hdr; 661 struct ieee80211_hdr_4addr *hdr;
662 int res, hdrlen; 662 int res, hdrlen;
663 663
664 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) 664 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
665 return 0; 665 return 0;
666 666
667 hdr = (struct ieee80211_hdr *) skb->data; 667 hdr = (struct ieee80211_hdr_4addr *) skb->data;
668 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); 668 hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
669 669
670 atomic_inc(&crypt->refcnt); 670 atomic_inc(&crypt->refcnt);
@@ -689,7 +689,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
689{ 689{
690 struct hostap_interface *iface; 690 struct hostap_interface *iface;
691 local_info_t *local; 691 local_info_t *local;
692 struct ieee80211_hdr *hdr; 692 struct ieee80211_hdr_4addr *hdr;
693 size_t hdrlen; 693 size_t hdrlen;
694 u16 fc, type, stype, sc; 694 u16 fc, type, stype, sc;
695 struct net_device *wds = NULL; 695 struct net_device *wds = NULL;
@@ -716,7 +716,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
716 dev = local->ddev; 716 dev = local->ddev;
717 iface = netdev_priv(dev); 717 iface = netdev_priv(dev);
718 718
719 hdr = (struct ieee80211_hdr *) skb->data; 719 hdr = (struct ieee80211_hdr_4addr *) skb->data;
720 stats = hostap_get_stats(dev); 720 stats = hostap_get_stats(dev);
721 721
722 if (skb->len < 10) 722 if (skb->len < 10)
@@ -737,7 +737,8 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
737 struct iw_quality wstats; 737 struct iw_quality wstats;
738 wstats.level = rx_stats->signal; 738 wstats.level = rx_stats->signal;
739 wstats.noise = rx_stats->noise; 739 wstats.noise = rx_stats->noise;
740 wstats.updated = 6; /* No qual value */ 740 wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED
741 | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM;
741 /* Update spy records */ 742 /* Update spy records */
742 wireless_spy_update(dev, hdr->addr2, &wstats); 743 wireless_spy_update(dev, hdr->addr2, &wstats);
743 } 744 }
@@ -889,7 +890,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
889 if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && 890 if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
890 (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) 891 (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
891 goto rx_dropped; 892 goto rx_dropped;
892 hdr = (struct ieee80211_hdr *) skb->data; 893 hdr = (struct ieee80211_hdr_4addr *) skb->data;
893 894
894 /* skb: hdr + (possibly fragmented) plaintext payload */ 895 /* skb: hdr + (possibly fragmented) plaintext payload */
895 896
@@ -941,7 +942,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
941 /* this was the last fragment and the frame will be 942 /* this was the last fragment and the frame will be
942 * delivered, so remove skb from fragment cache */ 943 * delivered, so remove skb from fragment cache */
943 skb = frag_skb; 944 skb = frag_skb;
944 hdr = (struct ieee80211_hdr *) skb->data; 945 hdr = (struct ieee80211_hdr_4addr *) skb->data;
945 prism2_frag_cache_invalidate(local, hdr); 946 prism2_frag_cache_invalidate(local, hdr);
946 } 947 }
947 948
@@ -952,7 +953,7 @@ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
952 hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt)) 953 hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
953 goto rx_dropped; 954 goto rx_dropped;
954 955
955 hdr = (struct ieee80211_hdr *) skb->data; 956 hdr = (struct ieee80211_hdr_4addr *) skb->data;
956 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) { 957 if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) {
957 if (local->ieee_802_1x && 958 if (local->ieee_802_1x &&
958 hostap_is_eapol_frame(local, skb)) { 959 hostap_is_eapol_frame(local, skb)) {
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
index 6358015f6526..9d24f8a38ac5 100644
--- a/drivers/net/wireless/hostap/hostap_80211_tx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -1,9 +1,9 @@
1void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) 1void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
2{ 2{
3 struct ieee80211_hdr *hdr; 3 struct ieee80211_hdr_4addr *hdr;
4 u16 fc; 4 u16 fc;
5 5
6 hdr = (struct ieee80211_hdr *) skb->data; 6 hdr = (struct ieee80211_hdr_4addr *) skb->data;
7 7
8 printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n", 8 printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
9 name, skb->len, jiffies); 9 name, skb->len, jiffies);
@@ -41,7 +41,7 @@ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
41 struct hostap_interface *iface; 41 struct hostap_interface *iface;
42 local_info_t *local; 42 local_info_t *local;
43 int need_headroom, need_tailroom = 0; 43 int need_headroom, need_tailroom = 0;
44 struct ieee80211_hdr hdr; 44 struct ieee80211_hdr_4addr hdr;
45 u16 fc, ethertype = 0; 45 u16 fc, ethertype = 0;
46 enum { 46 enum {
47 WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME 47 WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
@@ -244,7 +244,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
244 struct hostap_interface *iface; 244 struct hostap_interface *iface;
245 local_info_t *local; 245 local_info_t *local;
246 struct hostap_skb_tx_data *meta; 246 struct hostap_skb_tx_data *meta;
247 struct ieee80211_hdr *hdr; 247 struct ieee80211_hdr_4addr *hdr;
248 u16 fc; 248 u16 fc;
249 249
250 iface = netdev_priv(dev); 250 iface = netdev_priv(dev);
@@ -266,7 +266,7 @@ int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
266 meta->iface = iface; 266 meta->iface = iface;
267 267
268 if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) { 268 if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
269 hdr = (struct ieee80211_hdr *) skb->data; 269 hdr = (struct ieee80211_hdr_4addr *) skb->data;
270 fc = le16_to_cpu(hdr->frame_ctl); 270 fc = le16_to_cpu(hdr->frame_ctl);
271 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA && 271 if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
272 WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) { 272 WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) {
@@ -289,7 +289,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
289{ 289{
290 struct hostap_interface *iface; 290 struct hostap_interface *iface;
291 local_info_t *local; 291 local_info_t *local;
292 struct ieee80211_hdr *hdr; 292 struct ieee80211_hdr_4addr *hdr;
293 u16 fc; 293 u16 fc;
294 int hdr_len, res; 294 int hdr_len, res;
295 295
@@ -303,7 +303,7 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
303 303
304 if (local->tkip_countermeasures && 304 if (local->tkip_countermeasures &&
305 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) { 305 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
306 hdr = (struct ieee80211_hdr *) skb->data; 306 hdr = (struct ieee80211_hdr_4addr *) skb->data;
307 if (net_ratelimit()) { 307 if (net_ratelimit()) {
308 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " 308 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
309 "TX packet to " MACSTR "\n", 309 "TX packet to " MACSTR "\n",
@@ -317,15 +317,15 @@ struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
317 if (skb == NULL) 317 if (skb == NULL)
318 return NULL; 318 return NULL;
319 319
320 if ((skb_headroom(skb) < crypt->ops->extra_prefix_len || 320 if ((skb_headroom(skb) < crypt->ops->extra_mpdu_prefix_len ||
321 skb_tailroom(skb) < crypt->ops->extra_postfix_len) && 321 skb_tailroom(skb) < crypt->ops->extra_mpdu_postfix_len) &&
322 pskb_expand_head(skb, crypt->ops->extra_prefix_len, 322 pskb_expand_head(skb, crypt->ops->extra_mpdu_prefix_len,
323 crypt->ops->extra_postfix_len, GFP_ATOMIC)) { 323 crypt->ops->extra_mpdu_postfix_len, GFP_ATOMIC)) {
324 kfree_skb(skb); 324 kfree_skb(skb);
325 return NULL; 325 return NULL;
326 } 326 }
327 327
328 hdr = (struct ieee80211_hdr *) skb->data; 328 hdr = (struct ieee80211_hdr_4addr *) skb->data;
329 fc = le16_to_cpu(hdr->frame_ctl); 329 fc = le16_to_cpu(hdr->frame_ctl);
330 hdr_len = hostap_80211_get_hdrlen(fc); 330 hdr_len = hostap_80211_get_hdrlen(fc);
331 331
@@ -360,7 +360,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
360 ap_tx_ret tx_ret; 360 ap_tx_ret tx_ret;
361 struct hostap_skb_tx_data *meta; 361 struct hostap_skb_tx_data *meta;
362 int no_encrypt = 0; 362 int no_encrypt = 0;
363 struct ieee80211_hdr *hdr; 363 struct ieee80211_hdr_4addr *hdr;
364 364
365 iface = netdev_priv(dev); 365 iface = netdev_priv(dev);
366 local = iface->local; 366 local = iface->local;
@@ -403,7 +403,7 @@ int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
403 tx_ret = hostap_handle_sta_tx(local, &tx); 403 tx_ret = hostap_handle_sta_tx(local, &tx);
404 skb = tx.skb; 404 skb = tx.skb;
405 meta = (struct hostap_skb_tx_data *) skb->cb; 405 meta = (struct hostap_skb_tx_data *) skb->cb;
406 hdr = (struct ieee80211_hdr *) skb->data; 406 hdr = (struct ieee80211_hdr_4addr *) skb->data;
407 fc = le16_to_cpu(hdr->frame_ctl); 407 fc = le16_to_cpu(hdr->frame_ctl);
408 switch (tx_ret) { 408 switch (tx_ret) {
409 case AP_TX_CONTINUE: 409 case AP_TX_CONTINUE:
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 930cef8367f2..9da94ab7f05f 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -591,14 +591,14 @@ static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
591{ 591{
592 struct ap_data *ap = data; 592 struct ap_data *ap = data;
593 u16 fc; 593 u16 fc;
594 struct ieee80211_hdr *hdr; 594 struct ieee80211_hdr_4addr *hdr;
595 595
596 if (!ap->local->hostapd || !ap->local->apdev) { 596 if (!ap->local->hostapd || !ap->local->apdev) {
597 dev_kfree_skb(skb); 597 dev_kfree_skb(skb);
598 return; 598 return;
599 } 599 }
600 600
601 hdr = (struct ieee80211_hdr *) skb->data; 601 hdr = (struct ieee80211_hdr_4addr *) skb->data;
602 fc = le16_to_cpu(hdr->frame_ctl); 602 fc = le16_to_cpu(hdr->frame_ctl);
603 603
604 /* Pass the TX callback frame to the hostapd; use 802.11 header version 604 /* Pass the TX callback frame to the hostapd; use 802.11 header version
@@ -623,7 +623,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
623{ 623{
624 struct ap_data *ap = data; 624 struct ap_data *ap = data;
625 struct net_device *dev = ap->local->dev; 625 struct net_device *dev = ap->local->dev;
626 struct ieee80211_hdr *hdr; 626 struct ieee80211_hdr_4addr *hdr;
627 u16 fc, *pos, auth_alg, auth_transaction, status; 627 u16 fc, *pos, auth_alg, auth_transaction, status;
628 struct sta_info *sta = NULL; 628 struct sta_info *sta = NULL;
629 char *txt = NULL; 629 char *txt = NULL;
@@ -633,7 +633,7 @@ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
633 return; 633 return;
634 } 634 }
635 635
636 hdr = (struct ieee80211_hdr *) skb->data; 636 hdr = (struct ieee80211_hdr_4addr *) skb->data;
637 fc = le16_to_cpu(hdr->frame_ctl); 637 fc = le16_to_cpu(hdr->frame_ctl);
638 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT || 638 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
639 WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH || 639 WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH ||
@@ -692,7 +692,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
692{ 692{
693 struct ap_data *ap = data; 693 struct ap_data *ap = data;
694 struct net_device *dev = ap->local->dev; 694 struct net_device *dev = ap->local->dev;
695 struct ieee80211_hdr *hdr; 695 struct ieee80211_hdr_4addr *hdr;
696 u16 fc, *pos, status; 696 u16 fc, *pos, status;
697 struct sta_info *sta = NULL; 697 struct sta_info *sta = NULL;
698 char *txt = NULL; 698 char *txt = NULL;
@@ -702,7 +702,7 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
702 return; 702 return;
703 } 703 }
704 704
705 hdr = (struct ieee80211_hdr *) skb->data; 705 hdr = (struct ieee80211_hdr_4addr *) skb->data;
706 fc = le16_to_cpu(hdr->frame_ctl); 706 fc = le16_to_cpu(hdr->frame_ctl);
707 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT || 707 if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
708 (WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP && 708 (WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP &&
@@ -757,12 +757,12 @@ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
757static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) 757static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
758{ 758{
759 struct ap_data *ap = data; 759 struct ap_data *ap = data;
760 struct ieee80211_hdr *hdr; 760 struct ieee80211_hdr_4addr *hdr;
761 struct sta_info *sta; 761 struct sta_info *sta;
762 762
763 if (skb->len < 24) 763 if (skb->len < 24)
764 goto fail; 764 goto fail;
765 hdr = (struct ieee80211_hdr *) skb->data; 765 hdr = (struct ieee80211_hdr_4addr *) skb->data;
766 if (ok) { 766 if (ok) {
767 spin_lock(&ap->sta_table_lock); 767 spin_lock(&ap->sta_table_lock);
768 sta = ap_get_sta(ap, hdr->addr1); 768 sta = ap_get_sta(ap, hdr->addr1);
@@ -918,7 +918,7 @@ static void prism2_send_mgmt(struct net_device *dev,
918{ 918{
919 struct hostap_interface *iface; 919 struct hostap_interface *iface;
920 local_info_t *local; 920 local_info_t *local;
921 struct ieee80211_hdr *hdr; 921 struct ieee80211_hdr_4addr *hdr;
922 u16 fc; 922 u16 fc;
923 struct sk_buff *skb; 923 struct sk_buff *skb;
924 struct hostap_skb_tx_data *meta; 924 struct hostap_skb_tx_data *meta;
@@ -944,7 +944,7 @@ static void prism2_send_mgmt(struct net_device *dev,
944 944
945 fc = type_subtype; 945 fc = type_subtype;
946 hdrlen = hostap_80211_get_hdrlen(fc); 946 hdrlen = hostap_80211_get_hdrlen(fc);
947 hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen); 947 hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, hdrlen);
948 if (body) 948 if (body)
949 memcpy(skb_put(skb, body_len), body, body_len); 949 memcpy(skb_put(skb, body_len), body, body_len);
950 950
@@ -1256,14 +1256,14 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
1256 } 1256 }
1257 1257
1258 skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN + 1258 skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
1259 ap->crypt->extra_prefix_len + 1259 ap->crypt->extra_mpdu_prefix_len +
1260 ap->crypt->extra_postfix_len); 1260 ap->crypt->extra_mpdu_postfix_len);
1261 if (skb == NULL) { 1261 if (skb == NULL) {
1262 kfree(tmpbuf); 1262 kfree(tmpbuf);
1263 return NULL; 1263 return NULL;
1264 } 1264 }
1265 1265
1266 skb_reserve(skb, ap->crypt->extra_prefix_len); 1266 skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len);
1267 memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0, 1267 memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
1268 WLAN_AUTH_CHALLENGE_LEN); 1268 WLAN_AUTH_CHALLENGE_LEN);
1269 if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) { 1269 if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
@@ -1272,7 +1272,7 @@ static char * ap_auth_make_challenge(struct ap_data *ap)
1272 return NULL; 1272 return NULL;
1273 } 1273 }
1274 1274
1275 memcpy(tmpbuf, skb->data + ap->crypt->extra_prefix_len, 1275 memcpy(tmpbuf, skb->data + ap->crypt->extra_mpdu_prefix_len,
1276 WLAN_AUTH_CHALLENGE_LEN); 1276 WLAN_AUTH_CHALLENGE_LEN);
1277 dev_kfree_skb(skb); 1277 dev_kfree_skb(skb);
1278 1278
@@ -1285,7 +1285,7 @@ static void handle_authen(local_info_t *local, struct sk_buff *skb,
1285 struct hostap_80211_rx_status *rx_stats) 1285 struct hostap_80211_rx_status *rx_stats)
1286{ 1286{
1287 struct net_device *dev = local->dev; 1287 struct net_device *dev = local->dev;
1288 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1288 struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
1289 size_t hdrlen; 1289 size_t hdrlen;
1290 struct ap_data *ap = local->ap; 1290 struct ap_data *ap = local->ap;
1291 char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL; 1291 char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
@@ -1498,7 +1498,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
1498 struct hostap_80211_rx_status *rx_stats, int reassoc) 1498 struct hostap_80211_rx_status *rx_stats, int reassoc)
1499{ 1499{
1500 struct net_device *dev = local->dev; 1500 struct net_device *dev = local->dev;
1501 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1501 struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
1502 char body[12], *p, *lpos; 1502 char body[12], *p, *lpos;
1503 int len, left; 1503 int len, left;
1504 u16 *pos; 1504 u16 *pos;
@@ -1705,7 +1705,7 @@ static void handle_deauth(local_info_t *local, struct sk_buff *skb,
1705 struct hostap_80211_rx_status *rx_stats) 1705 struct hostap_80211_rx_status *rx_stats)
1706{ 1706{
1707 struct net_device *dev = local->dev; 1707 struct net_device *dev = local->dev;
1708 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1708 struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
1709 char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN); 1709 char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
1710 int len; 1710 int len;
1711 u16 reason_code, *pos; 1711 u16 reason_code, *pos;
@@ -1746,7 +1746,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
1746 struct hostap_80211_rx_status *rx_stats) 1746 struct hostap_80211_rx_status *rx_stats)
1747{ 1747{
1748 struct net_device *dev = local->dev; 1748 struct net_device *dev = local->dev;
1749 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1749 struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
1750 char *body = skb->data + IEEE80211_MGMT_HDR_LEN; 1750 char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
1751 int len; 1751 int len;
1752 u16 reason_code, *pos; 1752 u16 reason_code, *pos;
@@ -1784,7 +1784,7 @@ static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
1784 1784
1785/* Called only as a scheduled task for pending AP frames. */ 1785/* Called only as a scheduled task for pending AP frames. */
1786static void ap_handle_data_nullfunc(local_info_t *local, 1786static void ap_handle_data_nullfunc(local_info_t *local,
1787 struct ieee80211_hdr *hdr) 1787 struct ieee80211_hdr_4addr *hdr)
1788{ 1788{
1789 struct net_device *dev = local->dev; 1789 struct net_device *dev = local->dev;
1790 1790
@@ -1801,7 +1801,7 @@ static void ap_handle_data_nullfunc(local_info_t *local,
1801 1801
1802/* Called only as a scheduled task for pending AP frames. */ 1802/* Called only as a scheduled task for pending AP frames. */
1803static void ap_handle_dropped_data(local_info_t *local, 1803static void ap_handle_dropped_data(local_info_t *local,
1804 struct ieee80211_hdr *hdr) 1804 struct ieee80211_hdr_4addr *hdr)
1805{ 1805{
1806 struct net_device *dev = local->dev; 1806 struct net_device *dev = local->dev;
1807 struct sta_info *sta; 1807 struct sta_info *sta;
@@ -1860,7 +1860,7 @@ static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
1860 1860
1861/* Called only as a scheduled task for pending AP frames. */ 1861/* Called only as a scheduled task for pending AP frames. */
1862static void handle_pspoll(local_info_t *local, 1862static void handle_pspoll(local_info_t *local,
1863 struct ieee80211_hdr *hdr, 1863 struct ieee80211_hdr_4addr *hdr,
1864 struct hostap_80211_rx_status *rx_stats) 1864 struct hostap_80211_rx_status *rx_stats)
1865{ 1865{
1866 struct net_device *dev = local->dev; 1866 struct net_device *dev = local->dev;
@@ -1979,7 +1979,7 @@ static void handle_wds_oper_queue(void *data)
1979static void handle_beacon(local_info_t *local, struct sk_buff *skb, 1979static void handle_beacon(local_info_t *local, struct sk_buff *skb,
1980 struct hostap_80211_rx_status *rx_stats) 1980 struct hostap_80211_rx_status *rx_stats)
1981{ 1981{
1982 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1982 struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *) skb->data;
1983 char *body = skb->data + IEEE80211_MGMT_HDR_LEN; 1983 char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
1984 int len, left; 1984 int len, left;
1985 u16 *pos, beacon_int, capability; 1985 u16 *pos, beacon_int, capability;
@@ -2137,11 +2137,11 @@ static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
2137 struct net_device *dev = local->dev; 2137 struct net_device *dev = local->dev;
2138#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 2138#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2139 u16 fc, type, stype; 2139 u16 fc, type, stype;
2140 struct ieee80211_hdr *hdr; 2140 struct ieee80211_hdr_4addr *hdr;
2141 2141
2142 /* FIX: should give skb->len to handler functions and check that the 2142 /* FIX: should give skb->len to handler functions and check that the
2143 * buffer is long enough */ 2143 * buffer is long enough */
2144 hdr = (struct ieee80211_hdr *) skb->data; 2144 hdr = (struct ieee80211_hdr_4addr *) skb->data;
2145 fc = le16_to_cpu(hdr->frame_ctl); 2145 fc = le16_to_cpu(hdr->frame_ctl);
2146 type = WLAN_FC_GET_TYPE(fc); 2146 type = WLAN_FC_GET_TYPE(fc);
2147 stype = WLAN_FC_GET_STYPE(fc); 2147 stype = WLAN_FC_GET_STYPE(fc);
@@ -2258,7 +2258,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
2258 struct hostap_interface *iface; 2258 struct hostap_interface *iface;
2259 local_info_t *local; 2259 local_info_t *local;
2260 u16 fc; 2260 u16 fc;
2261 struct ieee80211_hdr *hdr; 2261 struct ieee80211_hdr_4addr *hdr;
2262 2262
2263 iface = netdev_priv(dev); 2263 iface = netdev_priv(dev);
2264 local = iface->local; 2264 local = iface->local;
@@ -2268,7 +2268,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
2268 2268
2269 local->stats.rx_packets++; 2269 local->stats.rx_packets++;
2270 2270
2271 hdr = (struct ieee80211_hdr *) skb->data; 2271 hdr = (struct ieee80211_hdr_4addr *) skb->data;
2272 fc = le16_to_cpu(hdr->frame_ctl); 2272 fc = le16_to_cpu(hdr->frame_ctl);
2273 2273
2274 if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL && 2274 if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
@@ -2289,7 +2289,7 @@ void hostap_rx(struct net_device *dev, struct sk_buff *skb,
2289static void schedule_packet_send(local_info_t *local, struct sta_info *sta) 2289static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2290{ 2290{
2291 struct sk_buff *skb; 2291 struct sk_buff *skb;
2292 struct ieee80211_hdr *hdr; 2292 struct ieee80211_hdr_4addr *hdr;
2293 struct hostap_80211_rx_status rx_stats; 2293 struct hostap_80211_rx_status rx_stats;
2294 2294
2295 if (skb_queue_empty(&sta->tx_buf)) 2295 if (skb_queue_empty(&sta->tx_buf))
@@ -2302,7 +2302,7 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
2302 return; 2302 return;
2303 } 2303 }
2304 2304
2305 hdr = (struct ieee80211_hdr *) skb_put(skb, 16); 2305 hdr = (struct ieee80211_hdr_4addr *) skb_put(skb, 16);
2306 2306
2307 /* Generate a fake pspoll frame to start packet delivery */ 2307 /* Generate a fake pspoll frame to start packet delivery */
2308 hdr->frame_ctl = __constant_cpu_to_le16( 2308 hdr->frame_ctl = __constant_cpu_to_le16(
@@ -2349,7 +2349,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
2349 qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence); 2349 qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
2350 qual[count].updated = sta->last_rx_updated; 2350 qual[count].updated = sta->last_rx_updated;
2351 2351
2352 sta->last_rx_updated = 0; 2352 sta->last_rx_updated = IW_QUAL_DBM;
2353 2353
2354 count++; 2354 count++;
2355 if (count >= buf_size) 2355 if (count >= buf_size)
@@ -2467,7 +2467,7 @@ static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
2467 } 2467 }
2468#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 2468#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
2469 2469
2470 sta->last_rx_updated = 0; 2470 sta->last_rx_updated = IW_QUAL_DBM;
2471 2471
2472 /* To be continued, we should make good use of IWEVCUSTOM */ 2472 /* To be continued, we should make good use of IWEVCUSTOM */
2473 } 2473 }
@@ -2685,7 +2685,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2685 struct sta_info *sta = NULL; 2685 struct sta_info *sta = NULL;
2686 struct sk_buff *skb = tx->skb; 2686 struct sk_buff *skb = tx->skb;
2687 int set_tim, ret; 2687 int set_tim, ret;
2688 struct ieee80211_hdr *hdr; 2688 struct ieee80211_hdr_4addr *hdr;
2689 struct hostap_skb_tx_data *meta; 2689 struct hostap_skb_tx_data *meta;
2690 2690
2691 meta = (struct hostap_skb_tx_data *) skb->cb; 2691 meta = (struct hostap_skb_tx_data *) skb->cb;
@@ -2694,7 +2694,7 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2694 meta->iface->type == HOSTAP_INTERFACE_STA) 2694 meta->iface->type == HOSTAP_INTERFACE_STA)
2695 goto out; 2695 goto out;
2696 2696
2697 hdr = (struct ieee80211_hdr *) skb->data; 2697 hdr = (struct ieee80211_hdr_4addr *) skb->data;
2698 2698
2699 if (hdr->addr1[0] & 0x01) { 2699 if (hdr->addr1[0] & 0x01) {
2700 /* broadcast/multicast frame - no AP related processing */ 2700 /* broadcast/multicast frame - no AP related processing */
@@ -2821,10 +2821,10 @@ void hostap_handle_sta_release(void *ptr)
2821void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) 2821void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
2822{ 2822{
2823 struct sta_info *sta; 2823 struct sta_info *sta;
2824 struct ieee80211_hdr *hdr; 2824 struct ieee80211_hdr_4addr *hdr;
2825 struct hostap_skb_tx_data *meta; 2825 struct hostap_skb_tx_data *meta;
2826 2826
2827 hdr = (struct ieee80211_hdr *) skb->data; 2827 hdr = (struct ieee80211_hdr_4addr *) skb->data;
2828 meta = (struct hostap_skb_tx_data *) skb->cb; 2828 meta = (struct hostap_skb_tx_data *) skb->cb;
2829 2829
2830 spin_lock(&local->ap->sta_table_lock); 2830 spin_lock(&local->ap->sta_table_lock);
@@ -2892,7 +2892,7 @@ static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
2892 2892
2893/* Called only as a tasklet (software IRQ). Called for each RX frame to update 2893/* Called only as a tasklet (software IRQ). Called for each RX frame to update
2894 * STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */ 2894 * STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */
2895int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr) 2895int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr)
2896{ 2896{
2897 struct sta_info *sta; 2897 struct sta_info *sta;
2898 u16 fc; 2898 u16 fc;
@@ -2925,12 +2925,12 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
2925 int ret; 2925 int ret;
2926 struct sta_info *sta; 2926 struct sta_info *sta;
2927 u16 fc, type, stype; 2927 u16 fc, type, stype;
2928 struct ieee80211_hdr *hdr; 2928 struct ieee80211_hdr_4addr *hdr;
2929 2929
2930 if (local->ap == NULL) 2930 if (local->ap == NULL)
2931 return AP_RX_CONTINUE; 2931 return AP_RX_CONTINUE;
2932 2932
2933 hdr = (struct ieee80211_hdr *) skb->data; 2933 hdr = (struct ieee80211_hdr_4addr *) skb->data;
2934 2934
2935 fc = le16_to_cpu(hdr->frame_ctl); 2935 fc = le16_to_cpu(hdr->frame_ctl);
2936 type = WLAN_FC_GET_TYPE(fc); 2936 type = WLAN_FC_GET_TYPE(fc);
@@ -3058,7 +3058,7 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
3058 3058
3059/* Called only as a tasklet (software IRQ) */ 3059/* Called only as a tasklet (software IRQ) */
3060int hostap_handle_sta_crypto(local_info_t *local, 3060int hostap_handle_sta_crypto(local_info_t *local,
3061 struct ieee80211_hdr *hdr, 3061 struct ieee80211_hdr_4addr *hdr,
3062 struct ieee80211_crypt_data **crypt, 3062 struct ieee80211_crypt_data **crypt,
3063 void **sta_ptr) 3063 void **sta_ptr)
3064{ 3064{
@@ -3160,7 +3160,7 @@ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
3160 3160
3161/* Called only as a tasklet (software IRQ) */ 3161/* Called only as a tasklet (software IRQ) */
3162int hostap_update_rx_stats(struct ap_data *ap, 3162int hostap_update_rx_stats(struct ap_data *ap,
3163 struct ieee80211_hdr *hdr, 3163 struct ieee80211_hdr_4addr *hdr,
3164 struct hostap_80211_rx_status *rx_stats) 3164 struct hostap_80211_rx_status *rx_stats)
3165{ 3165{
3166 struct sta_info *sta; 3166 struct sta_info *sta;
@@ -3174,7 +3174,7 @@ int hostap_update_rx_stats(struct ap_data *ap,
3174 sta->last_rx_silence = rx_stats->noise; 3174 sta->last_rx_silence = rx_stats->noise;
3175 sta->last_rx_signal = rx_stats->signal; 3175 sta->last_rx_signal = rx_stats->signal;
3176 sta->last_rx_rate = rx_stats->rate; 3176 sta->last_rx_rate = rx_stats->rate;
3177 sta->last_rx_updated = 7; 3177 sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
3178 if (rx_stats->rate == 10) 3178 if (rx_stats->rate == 10)
3179 sta->rx_count[0]++; 3179 sta->rx_count[0]++;
3180 else if (rx_stats->rate == 20) 3180 else if (rx_stats->rate == 20)
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 816a52bcea8f..6d00df69c2e3 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -233,7 +233,7 @@ struct hostap_tx_data {
233ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx); 233ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
234void hostap_handle_sta_release(void *ptr); 234void hostap_handle_sta_release(void *ptr);
235void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb); 235void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
236int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr); 236int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr_4addr *hdr);
237typedef enum { 237typedef enum {
238 AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED 238 AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED
239} ap_rx_ret; 239} ap_rx_ret;
@@ -241,13 +241,13 @@ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
241 struct sk_buff *skb, 241 struct sk_buff *skb,
242 struct hostap_80211_rx_status *rx_stats, 242 struct hostap_80211_rx_status *rx_stats,
243 int wds); 243 int wds);
244int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr, 244int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
245 struct ieee80211_crypt_data **crypt, 245 struct ieee80211_crypt_data **crypt,
246 void **sta_ptr); 246 void **sta_ptr);
247int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr); 247int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
248int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr); 248int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
249int hostap_add_sta(struct ap_data *ap, u8 *sta_addr); 249int hostap_add_sta(struct ap_data *ap, u8 *sta_addr);
250int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr, 250int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr_4addr *hdr,
251 struct hostap_80211_rx_status *rx_stats); 251 struct hostap_80211_rx_status *rx_stats);
252void hostap_update_rates(local_info_t *local); 252void hostap_update_rates(local_info_t *local);
253void hostap_add_wds_links(local_info_t *local); 253void hostap_add_wds_links(local_info_t *local);
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index faa83badf0a1..2643976a6677 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -492,42 +492,10 @@ static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
492} 492}
493 493
494 494
495static int prism2_pccard_dev_open(local_info_t *local)
496{
497 struct hostap_cs_priv *hw_priv = local->hw_priv;
498 hw_priv->link->open++;
499 return 0;
500}
501
502
503static int prism2_pccard_dev_close(local_info_t *local)
504{
505 struct hostap_cs_priv *hw_priv;
506
507 if (local == NULL || local->hw_priv == NULL)
508 return 1;
509 hw_priv = local->hw_priv;
510 if (hw_priv->link == NULL)
511 return 1;
512
513 if (!hw_priv->link->open) {
514 printk(KERN_WARNING "%s: prism2_pccard_dev_close(): "
515 "link not open?!\n", local->dev->name);
516 return 1;
517 }
518
519 hw_priv->link->open--;
520
521 return 0;
522}
523
524
525static struct prism2_helper_functions prism2_pccard_funcs = 495static struct prism2_helper_functions prism2_pccard_funcs =
526{ 496{
527 .card_present = prism2_pccard_card_present, 497 .card_present = prism2_pccard_card_present,
528 .cor_sreset = prism2_pccard_cor_sreset, 498 .cor_sreset = prism2_pccard_cor_sreset,
529 .dev_open = prism2_pccard_dev_open,
530 .dev_close = prism2_pccard_dev_close,
531 .genesis_reset = prism2_pccard_genesis_reset, 499 .genesis_reset = prism2_pccard_genesis_reset,
532 .hw_type = HOSTAP_HW_PCCARD, 500 .hw_type = HOSTAP_HW_PCCARD,
533}; 501};
@@ -597,13 +565,14 @@ static void prism2_detach(dev_link_t *link)
597 *linkp = link->next; 565 *linkp = link->next;
598 /* release net devices */ 566 /* release net devices */
599 if (link->priv) { 567 if (link->priv) {
568 struct hostap_cs_priv *hw_priv;
600 struct net_device *dev; 569 struct net_device *dev;
601 struct hostap_interface *iface; 570 struct hostap_interface *iface;
602 dev = link->priv; 571 dev = link->priv;
603 iface = netdev_priv(dev); 572 iface = netdev_priv(dev);
604 kfree(iface->local->hw_priv); 573 hw_priv = iface->local->hw_priv;
605 iface->local->hw_priv = NULL;
606 prism2_free_local_data(dev); 574 prism2_free_local_data(dev);
575 kfree(hw_priv);
607 } 576 }
608 kfree(link); 577 kfree(link);
609} 578}
@@ -883,6 +852,13 @@ static int prism2_event(event_t event, int priority,
883{ 852{
884 dev_link_t *link = args->client_data; 853 dev_link_t *link = args->client_data;
885 struct net_device *dev = (struct net_device *) link->priv; 854 struct net_device *dev = (struct net_device *) link->priv;
855 int dev_open = 0;
856
857 if (link->state & DEV_CONFIG) {
858 struct hostap_interface *iface = netdev_priv(dev);
859 if (iface && iface->local)
860 dev_open = iface->local->num_dev_open > 0;
861 }
886 862
887 switch (event) { 863 switch (event) {
888 case CS_EVENT_CARD_INSERTION: 864 case CS_EVENT_CARD_INSERTION:
@@ -911,7 +887,7 @@ static int prism2_event(event_t event, int priority,
911 case CS_EVENT_RESET_PHYSICAL: 887 case CS_EVENT_RESET_PHYSICAL:
912 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info); 888 PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info);
913 if (link->state & DEV_CONFIG) { 889 if (link->state & DEV_CONFIG) {
914 if (link->open) { 890 if (dev_open) {
915 netif_stop_queue(dev); 891 netif_stop_queue(dev);
916 netif_device_detach(dev); 892 netif_device_detach(dev);
917 } 893 }
@@ -931,8 +907,8 @@ static int prism2_event(event_t event, int priority,
931 pcmcia_request_configuration(link->handle, 907 pcmcia_request_configuration(link->handle,
932 &link->conf); 908 &link->conf);
933 prism2_hw_shutdown(dev, 1); 909 prism2_hw_shutdown(dev, 1);
934 prism2_hw_config(dev, link->open ? 0 : 1); 910 prism2_hw_config(dev, dev_open ? 0 : 1);
935 if (link->open) { 911 if (dev_open) {
936 netif_device_attach(dev); 912 netif_device_attach(dev);
937 netif_start_queue(dev); 913 netif_start_queue(dev);
938 } 914 }
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index e533a663deda..59fc15572395 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3322,6 +3322,18 @@ static void prism2_free_local_data(struct net_device *dev)
3322 iface = netdev_priv(dev); 3322 iface = netdev_priv(dev);
3323 local = iface->local; 3323 local = iface->local;
3324 3324
3325 /* Unregister all netdevs before freeing local data. */
3326 list_for_each_safe(ptr, n, &local->hostap_interfaces) {
3327 iface = list_entry(ptr, struct hostap_interface, list);
3328 if (iface->type == HOSTAP_INTERFACE_MASTER) {
3329 /* special handling for this interface below */
3330 continue;
3331 }
3332 hostap_remove_interface(iface->dev, 0, 1);
3333 }
3334
3335 unregister_netdev(local->dev);
3336
3325 flush_scheduled_work(); 3337 flush_scheduled_work();
3326 3338
3327 if (timer_pending(&local->crypt_deinit_timer)) 3339 if (timer_pending(&local->crypt_deinit_timer))
@@ -3382,15 +3394,6 @@ static void prism2_free_local_data(struct net_device *dev)
3382 prism2_download_free_data(local->dl_sec); 3394 prism2_download_free_data(local->dl_sec);
3383#endif /* PRISM2_DOWNLOAD_SUPPORT */ 3395#endif /* PRISM2_DOWNLOAD_SUPPORT */
3384 3396
3385 list_for_each_safe(ptr, n, &local->hostap_interfaces) {
3386 iface = list_entry(ptr, struct hostap_interface, list);
3387 if (iface->type == HOSTAP_INTERFACE_MASTER) {
3388 /* special handling for this interface below */
3389 continue;
3390 }
3391 hostap_remove_interface(iface->dev, 0, 1);
3392 }
3393
3394 prism2_clear_set_tim_queue(local); 3397 prism2_clear_set_tim_queue(local);
3395 3398
3396 list_for_each_safe(ptr, n, &local->bss_list) { 3399 list_for_each_safe(ptr, n, &local->bss_list) {
@@ -3403,7 +3406,6 @@ static void prism2_free_local_data(struct net_device *dev)
3403 kfree(local->last_scan_results); 3406 kfree(local->last_scan_results);
3404 kfree(local->generic_elem); 3407 kfree(local->generic_elem);
3405 3408
3406 unregister_netdev(local->dev);
3407 free_netdev(local->dev); 3409 free_netdev(local->dev);
3408} 3410}
3409 3411
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index e720369a3515..2617d70bcda9 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -50,7 +50,8 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
50#endif /* in_atomic */ 50#endif /* in_atomic */
51 51
52 if (update && prism2_update_comms_qual(dev) == 0) 52 if (update && prism2_update_comms_qual(dev) == 0)
53 wstats->qual.updated = 7; 53 wstats->qual.updated = IW_QUAL_ALL_UPDATED |
54 IW_QUAL_DBM;
54 55
55 wstats->qual.qual = local->comms_qual; 56 wstats->qual.qual = local->comms_qual;
56 wstats->qual.level = local->avg_signal; 57 wstats->qual.level = local->avg_signal;
@@ -59,7 +60,7 @@ static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
59 wstats->qual.qual = 0; 60 wstats->qual.qual = 0;
60 wstats->qual.level = 0; 61 wstats->qual.level = 0;
61 wstats->qual.noise = 0; 62 wstats->qual.noise = 0;
62 wstats->qual.updated = 0; 63 wstats->qual.updated = IW_QUAL_ALL_INVALID;
63 } 64 }
64 65
65 return wstats; 66 return wstats;
@@ -551,7 +552,6 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
551 552
552 kfree(addr); 553 kfree(addr);
553 kfree(qual); 554 kfree(qual);
554
555 return 0; 555 return 0;
556} 556}
557 557
@@ -1827,13 +1827,6 @@ static char * __prism2_translate_scan(local_info_t *local,
1827 iwe.cmd = SIOCGIWAP; 1827 iwe.cmd = SIOCGIWAP;
1828 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 1828 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
1829 memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN); 1829 memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
1830 /* FIX:
1831 * I do not know how this is possible, but iwe_stream_add_event
1832 * seems to re-order memcpy execution so that len is set only
1833 * after copying.. Pre-setting len here "fixes" this, but real
1834 * problems should be solved (after which these iwe.len
1835 * settings could be removed from this function). */
1836 iwe.len = IW_EV_ADDR_LEN;
1837 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 1830 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1838 IW_EV_ADDR_LEN); 1831 IW_EV_ADDR_LEN);
1839 1832
@@ -1843,7 +1836,6 @@ static char * __prism2_translate_scan(local_info_t *local,
1843 iwe.cmd = SIOCGIWESSID; 1836 iwe.cmd = SIOCGIWESSID;
1844 iwe.u.data.length = ssid_len; 1837 iwe.u.data.length = ssid_len;
1845 iwe.u.data.flags = 1; 1838 iwe.u.data.flags = 1;
1846 iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
1847 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid); 1839 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid);
1848 1840
1849 memset(&iwe, 0, sizeof(iwe)); 1841 memset(&iwe, 0, sizeof(iwe));
@@ -1859,7 +1851,6 @@ static char * __prism2_translate_scan(local_info_t *local,
1859 iwe.u.mode = IW_MODE_MASTER; 1851 iwe.u.mode = IW_MODE_MASTER;
1860 else 1852 else
1861 iwe.u.mode = IW_MODE_ADHOC; 1853 iwe.u.mode = IW_MODE_ADHOC;
1862 iwe.len = IW_EV_UINT_LEN;
1863 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 1854 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1864 IW_EV_UINT_LEN); 1855 IW_EV_UINT_LEN);
1865 } 1856 }
@@ -1877,7 +1868,6 @@ static char * __prism2_translate_scan(local_info_t *local,
1877 if (chan > 0) { 1868 if (chan > 0) {
1878 iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000; 1869 iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000;
1879 iwe.u.freq.e = 1; 1870 iwe.u.freq.e = 1;
1880 iwe.len = IW_EV_FREQ_LEN;
1881 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 1871 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1882 IW_EV_FREQ_LEN); 1872 IW_EV_FREQ_LEN);
1883 } 1873 }
@@ -1894,7 +1884,10 @@ static char * __prism2_translate_scan(local_info_t *local,
1894 iwe.u.qual.noise = 1884 iwe.u.qual.noise =
1895 HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl)); 1885 HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
1896 } 1886 }
1897 iwe.len = IW_EV_QUAL_LEN; 1887 iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED
1888 | IW_QUAL_NOISE_UPDATED
1889 | IW_QUAL_QUAL_INVALID
1890 | IW_QUAL_DBM;
1898 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe, 1891 current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
1899 IW_EV_QUAL_LEN); 1892 IW_EV_QUAL_LEN);
1900 } 1893 }
@@ -1906,7 +1899,6 @@ static char * __prism2_translate_scan(local_info_t *local,
1906 else 1899 else
1907 iwe.u.data.flags = IW_ENCODE_DISABLED; 1900 iwe.u.data.flags = IW_ENCODE_DISABLED;
1908 iwe.u.data.length = 0; 1901 iwe.u.data.length = 0;
1909 iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
1910 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ""); 1902 current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, "");
1911 1903
1912 /* TODO: add SuppRates into BSS table */ 1904 /* TODO: add SuppRates into BSS table */
@@ -1930,7 +1922,7 @@ static char * __prism2_translate_scan(local_info_t *local,
1930 } 1922 }
1931 1923
1932 /* TODO: add BeaconInt,resp_rate,atim into BSS table */ 1924 /* TODO: add BeaconInt,resp_rate,atim into BSS table */
1933 buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_KERNEL); 1925 buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC);
1934 if (buf && scan) { 1926 if (buf && scan) {
1935 memset(&iwe, 0, sizeof(iwe)); 1927 memset(&iwe, 0, sizeof(iwe));
1936 iwe.cmd = IWEVCUSTOM; 1928 iwe.cmd = IWEVCUSTOM;
@@ -3088,9 +3080,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
3088 ret = local->func->download(local, param); 3080 ret = local->func->download(local, param);
3089 3081
3090 out: 3082 out:
3091 if (param != NULL) 3083 kfree(param);
3092 kfree(param);
3093
3094 return ret; 3084 return ret;
3095} 3085}
3096#endif /* PRISM2_DOWNLOAD_SUPPORT */ 3086#endif /* PRISM2_DOWNLOAD_SUPPORT */
@@ -3897,9 +3887,7 @@ static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
3897 } 3887 }
3898 3888
3899 out: 3889 out:
3900 if (param != NULL) 3890 kfree(param);
3901 kfree(param);
3902
3903 return ret; 3891 return ret;
3904} 3892}
3905 3893
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 025f8cdb5566..da0c80fb941c 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -59,11 +59,13 @@ static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
59static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) 59static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
60{ 60{
61 struct hostap_interface *iface; 61 struct hostap_interface *iface;
62 struct hostap_pci_priv *hw_priv;
62 local_info_t *local; 63 local_info_t *local;
63 unsigned long flags; 64 unsigned long flags;
64 65
65 iface = netdev_priv(dev); 66 iface = netdev_priv(dev);
66 local = iface->local; 67 local = iface->local;
68 hw_priv = local->hw_priv;
67 69
68 spin_lock_irqsave(&local->lock, flags); 70 spin_lock_irqsave(&local->lock, flags);
69 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); 71 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
@@ -74,12 +76,14 @@ static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
74static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) 76static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
75{ 77{
76 struct hostap_interface *iface; 78 struct hostap_interface *iface;
79 struct hostap_pci_priv *hw_priv;
77 local_info_t *local; 80 local_info_t *local;
78 unsigned long flags; 81 unsigned long flags;
79 u8 v; 82 u8 v;
80 83
81 iface = netdev_priv(dev); 84 iface = netdev_priv(dev);
82 local = iface->local; 85 local = iface->local;
86 hw_priv = local->hw_priv;
83 87
84 spin_lock_irqsave(&local->lock, flags); 88 spin_lock_irqsave(&local->lock, flags);
85 v = readb(hw_priv->mem_start + a); 89 v = readb(hw_priv->mem_start + a);
@@ -91,11 +95,13 @@ static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
91static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) 95static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
92{ 96{
93 struct hostap_interface *iface; 97 struct hostap_interface *iface;
98 struct hostap_pci_priv *hw_priv;
94 local_info_t *local; 99 local_info_t *local;
95 unsigned long flags; 100 unsigned long flags;
96 101
97 iface = netdev_priv(dev); 102 iface = netdev_priv(dev);
98 local = iface->local; 103 local = iface->local;
104 hw_priv = local->hw_priv;
99 105
100 spin_lock_irqsave(&local->lock, flags); 106 spin_lock_irqsave(&local->lock, flags);
101 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); 107 prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
@@ -106,12 +112,14 @@ static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
106static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) 112static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
107{ 113{
108 struct hostap_interface *iface; 114 struct hostap_interface *iface;
115 struct hostap_pci_priv *hw_priv;
109 local_info_t *local; 116 local_info_t *local;
110 unsigned long flags; 117 unsigned long flags;
111 u16 v; 118 u16 v;
112 119
113 iface = netdev_priv(dev); 120 iface = netdev_priv(dev);
114 local = iface->local; 121 local = iface->local;
122 hw_priv = local->hw_priv;
115 123
116 spin_lock_irqsave(&local->lock, flags); 124 spin_lock_irqsave(&local->lock, flags);
117 v = readw(hw_priv->mem_start + a); 125 v = readw(hw_priv->mem_start + a);
@@ -277,8 +285,6 @@ static struct prism2_helper_functions prism2_pci_funcs =
277{ 285{
278 .card_present = NULL, 286 .card_present = NULL,
279 .cor_sreset = prism2_pci_cor_sreset, 287 .cor_sreset = prism2_pci_cor_sreset,
280 .dev_open = NULL,
281 .dev_close = NULL,
282 .genesis_reset = prism2_pci_genesis_reset, 288 .genesis_reset = prism2_pci_genesis_reset,
283 .hw_type = HOSTAP_HW_PCI, 289 .hw_type = HOSTAP_HW_PCI,
284}; 290};
@@ -352,8 +358,6 @@ static int prism2_pci_probe(struct pci_dev *pdev,
352 return hostap_hw_ready(dev); 358 return hostap_hw_ready(dev);
353 359
354 fail: 360 fail:
355 kfree(hw_priv);
356
357 if (irq_registered && dev) 361 if (irq_registered && dev)
358 free_irq(dev->irq, dev); 362 free_irq(dev->irq, dev);
359 363
@@ -364,10 +368,8 @@ static int prism2_pci_probe(struct pci_dev *pdev,
364 368
365 err_out_disable: 369 err_out_disable:
366 pci_disable_device(pdev); 370 pci_disable_device(pdev);
367 kfree(hw_priv);
368 if (local)
369 local->hw_priv = NULL;
370 prism2_free_local_data(dev); 371 prism2_free_local_data(dev);
372 kfree(hw_priv);
371 373
372 return -ENODEV; 374 return -ENODEV;
373} 375}
@@ -392,9 +394,8 @@ static void prism2_pci_remove(struct pci_dev *pdev)
392 free_irq(dev->irq, dev); 394 free_irq(dev->irq, dev);
393 395
394 mem_start = hw_priv->mem_start; 396 mem_start = hw_priv->mem_start;
395 kfree(hw_priv);
396 iface->local->hw_priv = NULL;
397 prism2_free_local_data(dev); 397 prism2_free_local_data(dev);
398 kfree(hw_priv);
398 399
399 iounmap(mem_start); 400 iounmap(mem_start);
400 401
@@ -441,7 +442,7 @@ static int prism2_pci_resume(struct pci_dev *pdev)
441MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); 442MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
442 443
443static struct pci_driver prism2_pci_drv_id = { 444static struct pci_driver prism2_pci_drv_id = {
444 .name = "prism2_pci", 445 .name = "hostap_pci",
445 .id_table = prism2_pci_id_table, 446 .id_table = prism2_pci_id_table,
446 .probe = prism2_pci_probe, 447 .probe = prism2_pci_probe,
447 .remove = prism2_pci_remove, 448 .remove = prism2_pci_remove,
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 474ef83d813e..78d67b408b2f 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -328,8 +328,6 @@ static struct prism2_helper_functions prism2_plx_funcs =
328{ 328{
329 .card_present = NULL, 329 .card_present = NULL,
330 .cor_sreset = prism2_plx_cor_sreset, 330 .cor_sreset = prism2_plx_cor_sreset,
331 .dev_open = NULL,
332 .dev_close = NULL,
333 .genesis_reset = prism2_plx_genesis_reset, 331 .genesis_reset = prism2_plx_genesis_reset,
334 .hw_type = HOSTAP_HW_PLX, 332 .hw_type = HOSTAP_HW_PLX,
335}; 333};
@@ -570,10 +568,8 @@ static int prism2_plx_probe(struct pci_dev *pdev,
570 return hostap_hw_ready(dev); 568 return hostap_hw_ready(dev);
571 569
572 fail: 570 fail:
573 kfree(hw_priv);
574 if (local)
575 local->hw_priv = NULL;
576 prism2_free_local_data(dev); 571 prism2_free_local_data(dev);
572 kfree(hw_priv);
577 573
578 if (irq_registered && dev) 574 if (irq_registered && dev)
579 free_irq(dev->irq, dev); 575 free_irq(dev->irq, dev);
@@ -606,9 +602,8 @@ static void prism2_plx_remove(struct pci_dev *pdev)
606 if (dev->irq) 602 if (dev->irq)
607 free_irq(dev->irq, dev); 603 free_irq(dev->irq, dev);
608 604
609 kfree(iface->local->hw_priv);
610 iface->local->hw_priv = NULL;
611 prism2_free_local_data(dev); 605 prism2_free_local_data(dev);
606 kfree(hw_priv);
612 pci_disable_device(pdev); 607 pci_disable_device(pdev);
613} 608}
614 609
@@ -616,7 +611,7 @@ static void prism2_plx_remove(struct pci_dev *pdev)
616MODULE_DEVICE_TABLE(pci, prism2_plx_id_table); 611MODULE_DEVICE_TABLE(pci, prism2_plx_id_table);
617 612
618static struct pci_driver prism2_plx_drv_id = { 613static struct pci_driver prism2_plx_drv_id = {
619 .name = "prism2_plx", 614 .name = "hostap_plx",
620 .id_table = prism2_plx_id_table, 615 .id_table = prism2_plx_id_table,
621 .probe = prism2_plx_probe, 616 .probe = prism2_plx_probe,
622 .remove = prism2_plx_remove, 617 .remove = prism2_plx_remove,
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index cc061e1560d3..cfd801559492 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -552,8 +552,6 @@ struct prism2_helper_functions {
552 * (hostap_{cs,plx,pci}.c */ 552 * (hostap_{cs,plx,pci}.c */
553 int (*card_present)(local_info_t *local); 553 int (*card_present)(local_info_t *local);
554 void (*cor_sreset)(local_info_t *local); 554 void (*cor_sreset)(local_info_t *local);
555 int (*dev_open)(local_info_t *local);
556 int (*dev_close)(local_info_t *local);
557 void (*genesis_reset)(local_info_t *local, int hcr); 555 void (*genesis_reset)(local_info_t *local, int hcr);
558 556
559 /* the following functions are from hostap_hw.c, but they may have some 557 /* the following functions are from hostap_hw.c, but they may have some
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 2414e6493aa5..ad7f8cd76db9 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -800,8 +800,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
800 * doesn't seem to have as many firmware restart cycles... 800 * doesn't seem to have as many firmware restart cycles...
801 * 801 *
802 * As a test, we're sticking in a 1/100s delay here */ 802 * As a test, we're sticking in a 1/100s delay here */
803 set_current_state(TASK_UNINTERRUPTIBLE); 803 schedule_timeout_uninterruptible(msecs_to_jiffies(10));
804 schedule_timeout(HZ / 100);
805 804
806 return 0; 805 return 0;
807 806
@@ -1256,8 +1255,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
1256 IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); 1255 IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
1257 i = 5000; 1256 i = 5000;
1258 do { 1257 do {
1259 set_current_state(TASK_UNINTERRUPTIBLE); 1258 schedule_timeout_uninterruptible(msecs_to_jiffies(40));
1260 schedule_timeout(40 * HZ / 1000);
1261 /* Todo... wait for sync command ... */ 1259 /* Todo... wait for sync command ... */
1262 1260
1263 read_register(priv->net_dev, IPW_REG_INTA, &inta); 1261 read_register(priv->net_dev, IPW_REG_INTA, &inta);
@@ -1411,8 +1409,7 @@ static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
1411 (val2 & IPW2100_COMMAND_PHY_OFF)) 1409 (val2 & IPW2100_COMMAND_PHY_OFF))
1412 return 0; 1410 return 0;
1413 1411
1414 set_current_state(TASK_UNINTERRUPTIBLE); 1412 schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY);
1415 schedule_timeout(HW_PHY_OFF_LOOP_DELAY);
1416 } 1413 }
1417 1414
1418 return -EIO; 1415 return -EIO;
@@ -1466,7 +1463,7 @@ fail_up:
1466 1463
1467static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv) 1464static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
1468{ 1465{
1469#define HW_POWER_DOWN_DELAY (HZ / 10) 1466#define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100))
1470 1467
1471 struct host_command cmd = { 1468 struct host_command cmd = {
1472 .host_command = HOST_PRE_POWER_DOWN, 1469 .host_command = HOST_PRE_POWER_DOWN,
@@ -1520,10 +1517,8 @@ static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
1520 printk(KERN_WARNING DRV_NAME ": " 1517 printk(KERN_WARNING DRV_NAME ": "
1521 "%s: Power down command failed: Error %d\n", 1518 "%s: Power down command failed: Error %d\n",
1522 priv->net_dev->name, err); 1519 priv->net_dev->name, err);
1523 else { 1520 else
1524 set_current_state(TASK_UNINTERRUPTIBLE); 1521 schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY);
1525 schedule_timeout(HW_POWER_DOWN_DELAY);
1526 }
1527 } 1522 }
1528 1523
1529 priv->status &= ~STATUS_ENABLED; 1524 priv->status &= ~STATUS_ENABLED;
@@ -2953,7 +2948,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
2953 int next = txq->next; 2948 int next = txq->next;
2954 int i = 0; 2949 int i = 0;
2955 struct ipw2100_data_header *ipw_hdr; 2950 struct ipw2100_data_header *ipw_hdr;
2956 struct ieee80211_hdr *hdr; 2951 struct ieee80211_hdr_3addr *hdr;
2957 2952
2958 while (!list_empty(&priv->tx_pend_list)) { 2953 while (!list_empty(&priv->tx_pend_list)) {
2959 /* if there isn't enough space in TBD queue, then 2954 /* if there isn't enough space in TBD queue, then
@@ -2989,7 +2984,7 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
2989 packet->index = txq->next; 2984 packet->index = txq->next;
2990 2985
2991 ipw_hdr = packet->info.d_struct.data; 2986 ipw_hdr = packet->info.d_struct.data;
2992 hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb-> 2987 hdr = (struct ieee80211_hdr_3addr *)packet->info.d_struct.txb->
2993 fragments[0]->data; 2988 fragments[0]->data;
2994 2989
2995 if (priv->ieee->iw_mode == IW_MODE_INFRA) { 2990 if (priv->ieee->iw_mode == IW_MODE_INFRA) {
@@ -3274,7 +3269,8 @@ static irqreturn_t ipw2100_interrupt(int irq, void *data,
3274 return IRQ_NONE; 3269 return IRQ_NONE;
3275} 3270}
3276 3271
3277static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev) 3272static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev,
3273 int pri)
3278{ 3274{
3279 struct ipw2100_priv *priv = ieee80211_priv(dev); 3275 struct ipw2100_priv *priv = ieee80211_priv(dev);
3280 struct list_head *element; 3276 struct list_head *element;
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
index 2a3cdbd50168..c9e99ce15d66 100644
--- a/drivers/net/wireless/ipw2100.h
+++ b/drivers/net/wireless/ipw2100.h
@@ -808,7 +808,7 @@ struct ipw2100_priv {
808struct ipw2100_rx { 808struct ipw2100_rx {
809 union { 809 union {
810 unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH]; 810 unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
811 struct ieee80211_hdr header; 811 struct ieee80211_hdr_4addr header;
812 u32 status; 812 u32 status;
813 struct ipw2100_notification notification; 813 struct ipw2100_notification notification;
814 struct ipw2100_cmd_header command; 814 struct ipw2100_cmd_header command;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index b7f275c00de3..3db0c32afe82 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4030,6 +4030,10 @@ static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
4030 int i; 4030 int i;
4031 4031
4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL); 4032 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
4033 if (unlikely(!rxq)) {
4034 IPW_ERROR("memory allocation failed\n");
4035 return NULL;
4036 }
4033 memset(rxq, 0, sizeof(*rxq)); 4037 memset(rxq, 0, sizeof(*rxq));
4034 spin_lock_init(&rxq->lock); 4038 spin_lock_init(&rxq->lock);
4035 INIT_LIST_HEAD(&rxq->rx_free); 4039 INIT_LIST_HEAD(&rxq->rx_free);
@@ -4904,7 +4908,7 @@ static void ipw_rx(struct ipw_priv *priv)
4904{ 4908{
4905 struct ipw_rx_mem_buffer *rxb; 4909 struct ipw_rx_mem_buffer *rxb;
4906 struct ipw_rx_packet *pkt; 4910 struct ipw_rx_packet *pkt;
4907 struct ieee80211_hdr *header; 4911 struct ieee80211_hdr_4addr *header;
4908 u32 r, w, i; 4912 u32 r, w, i;
4909 u8 network_packet; 4913 u8 network_packet;
4910 4914
@@ -4967,8 +4971,9 @@ static void ipw_rx(struct ipw_priv *priv)
4967#endif 4971#endif
4968 4972
4969 header = 4973 header =
4970 (struct ieee80211_hdr *)(rxb->skb->data + 4974 (struct ieee80211_hdr_4addr *)(rxb->skb->
4971 IPW_RX_FRAME_SIZE); 4975 data +
4976 IPW_RX_FRAME_SIZE);
4972 /* TODO: Check Ad-Hoc dest/source and make sure 4977 /* TODO: Check Ad-Hoc dest/source and make sure
4973 * that we are actually parsing these packets 4978 * that we are actually parsing these packets
4974 * correctly -- we should probably use the 4979 * correctly -- we should probably use the
@@ -5317,8 +5322,6 @@ static int ipw_wx_set_freq(struct net_device *dev,
5317 5322
5318 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 5323 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5319 return ipw_set_channel(priv, (u8) fwrq->m); 5324 return ipw_set_channel(priv, (u8) fwrq->m);
5320
5321 return 0;
5322} 5325}
5323 5326
5324static int ipw_wx_get_freq(struct net_device *dev, 5327static int ipw_wx_get_freq(struct net_device *dev,
@@ -6010,12 +6013,12 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
6010 } 6013 }
6011 6014
6012 if (priv->adapter == IPW_2915ABG) { 6015 if (priv->adapter == IPW_2915ABG) {
6013 priv->ieee->abg_ture = 1; 6016 priv->ieee->abg_true = 1;
6014 if (mode & IEEE_A) { 6017 if (mode & IEEE_A) {
6015 band |= IEEE80211_52GHZ_BAND; 6018 band |= IEEE80211_52GHZ_BAND;
6016 modulation |= IEEE80211_OFDM_MODULATION; 6019 modulation |= IEEE80211_OFDM_MODULATION;
6017 } else 6020 } else
6018 priv->ieee->abg_ture = 0; 6021 priv->ieee->abg_true = 0;
6019 } else { 6022 } else {
6020 if (mode & IEEE_A) { 6023 if (mode & IEEE_A) {
6021 IPW_WARNING("Attempt to set 2200BG into " 6024 IPW_WARNING("Attempt to set 2200BG into "
@@ -6023,20 +6026,20 @@ static int ipw_wx_set_wireless_mode(struct net_device *dev,
6023 return -EINVAL; 6026 return -EINVAL;
6024 } 6027 }
6025 6028
6026 priv->ieee->abg_ture = 0; 6029 priv->ieee->abg_true = 0;
6027 } 6030 }
6028 6031
6029 if (mode & IEEE_B) { 6032 if (mode & IEEE_B) {
6030 band |= IEEE80211_24GHZ_BAND; 6033 band |= IEEE80211_24GHZ_BAND;
6031 modulation |= IEEE80211_CCK_MODULATION; 6034 modulation |= IEEE80211_CCK_MODULATION;
6032 } else 6035 } else
6033 priv->ieee->abg_ture = 0; 6036 priv->ieee->abg_true = 0;
6034 6037
6035 if (mode & IEEE_G) { 6038 if (mode & IEEE_G) {
6036 band |= IEEE80211_24GHZ_BAND; 6039 band |= IEEE80211_24GHZ_BAND;
6037 modulation |= IEEE80211_OFDM_MODULATION; 6040 modulation |= IEEE80211_OFDM_MODULATION;
6038 } else 6041 } else
6039 priv->ieee->abg_ture = 0; 6042 priv->ieee->abg_true = 0;
6040 6043
6041 priv->ieee->mode = mode; 6044 priv->ieee->mode = mode;
6042 priv->ieee->freq_band = band; 6045 priv->ieee->freq_band = band;
@@ -6325,7 +6328,7 @@ we need to heavily modify the ieee80211_skb_to_txb.
6325 6328
6326static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb) 6329static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6327{ 6330{
6328 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) 6331 struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)
6329 txb->fragments[0]->data; 6332 txb->fragments[0]->data;
6330 int i = 0; 6333 int i = 0;
6331 struct tfd_frame *tfd; 6334 struct tfd_frame *tfd;
@@ -6448,7 +6451,7 @@ static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6448} 6451}
6449 6452
6450static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb, 6453static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6451 struct net_device *dev) 6454 struct net_device *dev, int pri)
6452{ 6455{
6453 struct ipw_priv *priv = ieee80211_priv(dev); 6456 struct ipw_priv *priv = ieee80211_priv(dev);
6454 unsigned long flags; 6457 unsigned long flags;
@@ -7108,7 +7111,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7108 printk(KERN_INFO DRV_NAME 7111 printk(KERN_INFO DRV_NAME
7109 ": Detected Intel PRO/Wireless 2915ABG Network " 7112 ": Detected Intel PRO/Wireless 2915ABG Network "
7110 "Connection\n"); 7113 "Connection\n");
7111 priv->ieee->abg_ture = 1; 7114 priv->ieee->abg_true = 1;
7112 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND; 7115 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7113 modulation = IEEE80211_OFDM_MODULATION | 7116 modulation = IEEE80211_OFDM_MODULATION |
7114 IEEE80211_CCK_MODULATION; 7117 IEEE80211_CCK_MODULATION;
@@ -7124,7 +7127,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7124 ": Detected Intel PRO/Wireless 2200BG Network " 7127 ": Detected Intel PRO/Wireless 2200BG Network "
7125 "Connection\n"); 7128 "Connection\n");
7126 7129
7127 priv->ieee->abg_ture = 0; 7130 priv->ieee->abg_true = 0;
7128 band = IEEE80211_24GHZ_BAND; 7131 band = IEEE80211_24GHZ_BAND;
7129 modulation = IEEE80211_OFDM_MODULATION | 7132 modulation = IEEE80211_OFDM_MODULATION |
7130 IEEE80211_CCK_MODULATION; 7133 IEEE80211_CCK_MODULATION;
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 5b00882133f9..e9cf32bf3e31 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -1654,12 +1654,12 @@ static const long ipw_frequencies[] = {
1654 1654
1655#define IPW_MAX_CONFIG_RETRIES 10 1655#define IPW_MAX_CONFIG_RETRIES 10
1656 1656
1657static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr) 1657static inline u32 frame_hdr_len(struct ieee80211_hdr_4addr *hdr)
1658{ 1658{
1659 u32 retval; 1659 u32 retval;
1660 u16 fc; 1660 u16 fc;
1661 1661
1662 retval = sizeof(struct ieee80211_hdr); 1662 retval = sizeof(struct ieee80211_hdr_3addr);
1663 fc = le16_to_cpu(hdr->frame_ctl); 1663 fc = le16_to_cpu(hdr->frame_ctl);
1664 1664
1665 /* 1665 /*
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/net/wireless/netwave_cs.c
index ca6c03c89926..92793b958e32 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/net/wireless/netwave_cs.c
@@ -57,9 +57,7 @@
57#include <linux/bitops.h> 57#include <linux/bitops.h>
58#ifdef CONFIG_NET_RADIO 58#ifdef CONFIG_NET_RADIO
59#include <linux/wireless.h> 59#include <linux/wireless.h>
60#if WIRELESS_EXT > 12
61#include <net/iw_handler.h> 60#include <net/iw_handler.h>
62#endif /* WIRELESS_EXT > 12 */
63#endif 61#endif
64 62
65#include <pcmcia/cs_types.h> 63#include <pcmcia/cs_types.h>
@@ -225,10 +223,7 @@ static void update_stats(struct net_device *dev);
225static struct net_device_stats *netwave_get_stats(struct net_device *dev); 223static struct net_device_stats *netwave_get_stats(struct net_device *dev);
226 224
227/* Wireless extensions */ 225/* Wireless extensions */
228#ifdef WIRELESS_EXT
229static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev); 226static struct iw_statistics* netwave_get_wireless_stats(struct net_device *dev);
230#endif
231static int netwave_ioctl(struct net_device *, struct ifreq *, int);
232 227
233static void set_multicast_list(struct net_device *dev); 228static void set_multicast_list(struct net_device *dev);
234 229
@@ -260,26 +255,7 @@ static dev_link_t *dev_list;
260 because they generally can't be allocated dynamically. 255 because they generally can't be allocated dynamically.
261*/ 256*/
262 257
263#if WIRELESS_EXT <= 12
264/* Wireless extensions backward compatibility */
265
266/* Part of iw_handler prototype we need */
267struct iw_request_info
268{
269 __u16 cmd; /* Wireless Extension command */
270 __u16 flags; /* More to come ;-) */
271};
272
273/* Wireless Extension Backward compatibility - Jean II
274 * If the new wireless device private ioctl range is not defined,
275 * default to standard device private ioctl range */
276#ifndef SIOCIWFIRSTPRIV
277#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
278#endif /* SIOCIWFIRSTPRIV */
279
280#else /* WIRELESS_EXT <= 12 */
281static const struct iw_handler_def netwave_handler_def; 258static const struct iw_handler_def netwave_handler_def;
282#endif /* WIRELESS_EXT <= 12 */
283 259
284#define SIOCGIPSNAP SIOCIWFIRSTPRIV + 1 /* Site Survey Snapshot */ 260#define SIOCGIPSNAP SIOCIWFIRSTPRIV + 1 /* Site Survey Snapshot */
285 261
@@ -319,9 +295,7 @@ typedef struct netwave_private {
319 struct timer_list watchdog; /* To avoid blocking state */ 295 struct timer_list watchdog; /* To avoid blocking state */
320 struct site_survey nss; 296 struct site_survey nss;
321 struct net_device_stats stats; 297 struct net_device_stats stats;
322#ifdef WIRELESS_EXT
323 struct iw_statistics iw_stats; /* Wireless stats */ 298 struct iw_statistics iw_stats; /* Wireless stats */
324#endif
325} netwave_private; 299} netwave_private;
326 300
327#ifdef NETWAVE_STATS 301#ifdef NETWAVE_STATS
@@ -353,7 +327,6 @@ static inline void wait_WOC(unsigned int iobase)
353 while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ; 327 while ((inb(iobase + NETWAVE_REG_ASR) & 0x8) != 0x8) ;
354} 328}
355 329
356#ifdef WIRELESS_EXT
357static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase, 330static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
358 kio_addr_t iobase) { 331 kio_addr_t iobase) {
359 u_short resultBuffer; 332 u_short resultBuffer;
@@ -376,9 +349,7 @@ static void netwave_snapshot(netwave_private *priv, u_char __iomem *ramBase,
376 sizeof(struct site_survey)); 349 sizeof(struct site_survey));
377 } 350 }
378} 351}
379#endif
380 352
381#ifdef WIRELESS_EXT
382/* 353/*
383 * Function netwave_get_wireless_stats (dev) 354 * Function netwave_get_wireless_stats (dev)
384 * 355 *
@@ -411,7 +382,6 @@ static struct iw_statistics *netwave_get_wireless_stats(struct net_device *dev)
411 382
412 return &priv->iw_stats; 383 return &priv->iw_stats;
413} 384}
414#endif
415 385
416/* 386/*
417 * Function netwave_attach (void) 387 * Function netwave_attach (void)
@@ -471,13 +441,7 @@ static dev_link_t *netwave_attach(void)
471 dev->get_stats = &netwave_get_stats; 441 dev->get_stats = &netwave_get_stats;
472 dev->set_multicast_list = &set_multicast_list; 442 dev->set_multicast_list = &set_multicast_list;
473 /* wireless extensions */ 443 /* wireless extensions */
474#if WIRELESS_EXT <= 16
475 dev->get_wireless_stats = &netwave_get_wireless_stats;
476#endif /* WIRELESS_EXT <= 16 */
477#if WIRELESS_EXT > 12
478 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def; 444 dev->wireless_handlers = (struct iw_handler_def *)&netwave_handler_def;
479#endif /* WIRELESS_EXT > 12 */
480 dev->do_ioctl = &netwave_ioctl;
481 445
482 dev->tx_timeout = &netwave_watchdog; 446 dev->tx_timeout = &netwave_watchdog;
483 dev->watchdog_timeo = TX_TIMEOUT; 447 dev->watchdog_timeo = TX_TIMEOUT;
@@ -576,13 +540,8 @@ static int netwave_set_nwid(struct net_device *dev,
576 /* Disable interrupts & save flags */ 540 /* Disable interrupts & save flags */
577 spin_lock_irqsave(&priv->spinlock, flags); 541 spin_lock_irqsave(&priv->spinlock, flags);
578 542
579#if WIRELESS_EXT > 8
580 if(!wrqu->nwid.disabled) { 543 if(!wrqu->nwid.disabled) {
581 domain = wrqu->nwid.value; 544 domain = wrqu->nwid.value;
582#else /* WIRELESS_EXT > 8 */
583 if(wrqu->nwid.on) {
584 domain = wrqu->nwid.nwid;
585#endif /* WIRELESS_EXT > 8 */
586 printk( KERN_DEBUG "Setting domain to 0x%x%02x\n", 545 printk( KERN_DEBUG "Setting domain to 0x%x%02x\n",
587 (domain >> 8) & 0x01, domain & 0xff); 546 (domain >> 8) & 0x01, domain & 0xff);
588 wait_WOC(iobase); 547 wait_WOC(iobase);
@@ -606,15 +565,9 @@ static int netwave_get_nwid(struct net_device *dev,
606 union iwreq_data *wrqu, 565 union iwreq_data *wrqu,
607 char *extra) 566 char *extra)
608{ 567{
609#if WIRELESS_EXT > 8
610 wrqu->nwid.value = domain; 568 wrqu->nwid.value = domain;
611 wrqu->nwid.disabled = 0; 569 wrqu->nwid.disabled = 0;
612 wrqu->nwid.fixed = 1; 570 wrqu->nwid.fixed = 1;
613#else /* WIRELESS_EXT > 8 */
614 wrqu->nwid.nwid = domain;
615 wrqu->nwid.on = 1;
616#endif /* WIRELESS_EXT > 8 */
617
618 return 0; 571 return 0;
619} 572}
620 573
@@ -657,17 +610,11 @@ static int netwave_get_scramble(struct net_device *dev,
657{ 610{
658 key[1] = scramble_key & 0xff; 611 key[1] = scramble_key & 0xff;
659 key[0] = (scramble_key>>8) & 0xff; 612 key[0] = (scramble_key>>8) & 0xff;
660#if WIRELESS_EXT > 8
661 wrqu->encoding.flags = IW_ENCODE_ENABLED; 613 wrqu->encoding.flags = IW_ENCODE_ENABLED;
662 wrqu->encoding.length = 2; 614 wrqu->encoding.length = 2;
663#else /* WIRELESS_EXT > 8 */
664 wrqu->encoding.method = 1;
665#endif /* WIRELESS_EXT > 8 */
666
667 return 0; 615 return 0;
668} 616}
669 617
670#if WIRELESS_EXT > 8
671/* 618/*
672 * Wireless Handler : get mode 619 * Wireless Handler : get mode
673 */ 620 */
@@ -683,7 +630,6 @@ static int netwave_get_mode(struct net_device *dev,
683 630
684 return 0; 631 return 0;
685} 632}
686#endif /* WIRELESS_EXT > 8 */
687 633
688/* 634/*
689 * Wireless Handler : get range info 635 * Wireless Handler : get range info
@@ -702,11 +648,9 @@ static int netwave_get_range(struct net_device *dev,
702 /* Set all the info we don't care or don't know about to zero */ 648 /* Set all the info we don't care or don't know about to zero */
703 memset(range, 0, sizeof(struct iw_range)); 649 memset(range, 0, sizeof(struct iw_range));
704 650
705#if WIRELESS_EXT > 10
706 /* Set the Wireless Extension versions */ 651 /* Set the Wireless Extension versions */
707 range->we_version_compiled = WIRELESS_EXT; 652 range->we_version_compiled = WIRELESS_EXT;
708 range->we_version_source = 9; /* Nothing for us in v10 and v11 */ 653 range->we_version_source = 9; /* Nothing for us in v10 and v11 */
709#endif /* WIRELESS_EXT > 10 */
710 654
711 /* Set information in the range struct */ 655 /* Set information in the range struct */
712 range->throughput = 450 * 1000; /* don't argue on this ! */ 656 range->throughput = 450 * 1000; /* don't argue on this ! */
@@ -720,16 +664,12 @@ static int netwave_get_range(struct net_device *dev,
720 range->max_qual.level = 255; 664 range->max_qual.level = 255;
721 range->max_qual.noise = 0; 665 range->max_qual.noise = 0;
722 666
723#if WIRELESS_EXT > 7
724 range->num_bitrates = 1; 667 range->num_bitrates = 1;
725 range->bitrate[0] = 1000000; /* 1 Mb/s */ 668 range->bitrate[0] = 1000000; /* 1 Mb/s */
726#endif /* WIRELESS_EXT > 7 */
727 669
728#if WIRELESS_EXT > 8
729 range->encoding_size[0] = 2; /* 16 bits scrambling */ 670 range->encoding_size[0] = 2; /* 16 bits scrambling */
730 range->num_encoding_sizes = 1; 671 range->num_encoding_sizes = 1;
731 range->max_encoding_tokens = 1; /* Only one key possible */ 672 range->max_encoding_tokens = 1; /* Only one key possible */
732#endif /* WIRELESS_EXT > 8 */
733 673
734 return ret; 674 return ret;
735} 675}
@@ -775,8 +715,6 @@ static const struct iw_priv_args netwave_private_args[] = {
775 "getsitesurvey" }, 715 "getsitesurvey" },
776}; 716};
777 717
778#if WIRELESS_EXT > 12
779
780static const iw_handler netwave_handler[] = 718static const iw_handler netwave_handler[] =
781{ 719{
782 NULL, /* SIOCSIWNAME */ 720 NULL, /* SIOCSIWNAME */
@@ -839,131 +777,8 @@ static const struct iw_handler_def netwave_handler_def =
839 .standard = (iw_handler *) netwave_handler, 777 .standard = (iw_handler *) netwave_handler,
840 .private = (iw_handler *) netwave_private_handler, 778 .private = (iw_handler *) netwave_private_handler,
841 .private_args = (struct iw_priv_args *) netwave_private_args, 779 .private_args = (struct iw_priv_args *) netwave_private_args,
842#if WIRELESS_EXT > 16
843 .get_wireless_stats = netwave_get_wireless_stats, 780 .get_wireless_stats = netwave_get_wireless_stats,
844#endif /* WIRELESS_EXT > 16 */
845}; 781};
846#endif /* WIRELESS_EXT > 12 */
847
848/*
849 * Function netwave_ioctl (dev, rq, cmd)
850 *
851 * Perform ioctl : config & info stuff
852 * This is the stuff that are treated the wireless extensions (iwconfig)
853 *
854 */
855static int netwave_ioctl(struct net_device *dev, /* ioctl device */
856 struct ifreq *rq, /* Data passed */
857 int cmd) /* Ioctl number */
858{
859 int ret = 0;
860#ifdef WIRELESS_EXT
861#if WIRELESS_EXT <= 12
862 struct iwreq *wrq = (struct iwreq *) rq;
863#endif
864#endif
865
866 DEBUG(0, "%s: ->netwave_ioctl(cmd=0x%X)\n", dev->name, cmd);
867
868 /* Look what is the request */
869 switch(cmd) {
870 /* --------------- WIRELESS EXTENSIONS --------------- */
871#ifdef WIRELESS_EXT
872#if WIRELESS_EXT <= 12
873 case SIOCGIWNAME:
874 netwave_get_name(dev, NULL, &(wrq->u), NULL);
875 break;
876 case SIOCSIWNWID:
877 ret = netwave_set_nwid(dev, NULL, &(wrq->u), NULL);
878 break;
879 case SIOCGIWNWID:
880 ret = netwave_get_nwid(dev, NULL, &(wrq->u), NULL);
881 break;
882#if WIRELESS_EXT > 8 /* Note : The API did change... */
883 case SIOCGIWENCODE:
884 /* Get scramble key */
885 if(wrq->u.encoding.pointer != (caddr_t) 0)
886 {
887 char key[2];
888 ret = netwave_get_scramble(dev, NULL, &(wrq->u), key);
889 if(copy_to_user(wrq->u.encoding.pointer, key, 2))
890 ret = -EFAULT;
891 }
892 break;
893 case SIOCSIWENCODE:
894 /* Set scramble key */
895 if(wrq->u.encoding.pointer != (caddr_t) 0)
896 {
897 char key[2];
898 if(copy_from_user(key, wrq->u.encoding.pointer, 2))
899 {
900 ret = -EFAULT;
901 break;
902 }
903 ret = netwave_set_scramble(dev, NULL, &(wrq->u), key);
904 }
905 break;
906 case SIOCGIWMODE:
907 /* Mode of operation */
908 ret = netwave_get_mode(dev, NULL, &(wrq->u), NULL);
909 break;
910#else /* WIRELESS_EXT > 8 */
911 case SIOCGIWENCODE:
912 /* Get scramble key */
913 ret = netwave_get_scramble(dev, NULL, &(wrq->u),
914 (char *) &wrq->u.encoding.code);
915 break;
916 case SIOCSIWENCODE:
917 /* Set scramble key */
918 ret = netwave_set_scramble(dev, NULL, &(wrq->u),
919 (char *) &wrq->u.encoding.code);
920 break;
921#endif /* WIRELESS_EXT > 8 */
922 case SIOCGIWRANGE:
923 /* Basic checking... */
924 if(wrq->u.data.pointer != (caddr_t) 0) {
925 struct iw_range range;
926 ret = netwave_get_range(dev, NULL, &(wrq->u), (char *) &range);
927 if (copy_to_user(wrq->u.data.pointer, &range,
928 sizeof(struct iw_range)))
929 ret = -EFAULT;
930 }
931 break;
932 case SIOCGIWPRIV:
933 /* Basic checking... */
934 if(wrq->u.data.pointer != (caddr_t) 0) {
935 /* Set the number of ioctl available */
936 wrq->u.data.length = sizeof(netwave_private_args) / sizeof(netwave_private_args[0]);
937
938 /* Copy structure to the user buffer */
939 if(copy_to_user(wrq->u.data.pointer,
940 (u_char *) netwave_private_args,
941 sizeof(netwave_private_args)))
942 ret = -EFAULT;
943 }
944 break;
945 case SIOCGIPSNAP:
946 if(wrq->u.data.pointer != (caddr_t) 0) {
947 char buffer[sizeof( struct site_survey)];
948 ret = netwave_get_snap(dev, NULL, &(wrq->u), buffer);
949 /* Copy structure to the user buffer */
950 if(copy_to_user(wrq->u.data.pointer,
951 buffer,
952 sizeof( struct site_survey)))
953 {
954 printk(KERN_DEBUG "Bad buffer!\n");
955 break;
956 }
957 }
958 break;
959#endif /* WIRELESS_EXT <= 12 */
960#endif /* WIRELESS_EXT */
961 default:
962 ret = -EOPNOTSUPP;
963 }
964
965 return ret;
966}
967 782
968/* 783/*
969 * Function netwave_pcmcia_config (link) 784 * Function netwave_pcmcia_config (link)
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 15ceaf615756..488ab06fb79f 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -77,30 +77,16 @@
77#define DRIVER_NAME "orinoco" 77#define DRIVER_NAME "orinoco"
78 78
79#include <linux/config.h> 79#include <linux/config.h>
80
81#include <linux/module.h> 80#include <linux/module.h>
82#include <linux/kernel.h> 81#include <linux/kernel.h>
83#include <linux/init.h> 82#include <linux/init.h>
84#include <linux/ptrace.h>
85#include <linux/slab.h>
86#include <linux/string.h>
87#include <linux/timer.h>
88#include <linux/ioport.h>
89#include <linux/netdevice.h> 83#include <linux/netdevice.h>
90#include <linux/if_arp.h>
91#include <linux/etherdevice.h> 84#include <linux/etherdevice.h>
92#include <linux/ethtool.h> 85#include <linux/ethtool.h>
93#include <linux/wireless.h> 86#include <linux/wireless.h>
94#include <net/iw_handler.h> 87#include <net/iw_handler.h>
95#include <net/ieee80211.h> 88#include <net/ieee80211.h>
96 89
97#include <net/ieee80211.h>
98
99#include <asm/uaccess.h>
100#include <asm/io.h>
101#include <asm/system.h>
102
103#include "hermes.h"
104#include "hermes_rid.h" 90#include "hermes_rid.h"
105#include "orinoco.h" 91#include "orinoco.h"
106 92
@@ -137,7 +123,7 @@ MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions");
137 123
138/* We do this this way to avoid ifdefs in the actual code */ 124/* We do this this way to avoid ifdefs in the actual code */
139#ifdef WIRELESS_SPY 125#ifdef WIRELESS_SPY
140#define SPY_NUMBER(priv) (priv->spy_number) 126#define SPY_NUMBER(priv) (priv->spy_data.spy_number)
141#else 127#else
142#define SPY_NUMBER(priv) 0 128#define SPY_NUMBER(priv) 0
143#endif /* WIRELESS_SPY */ 129#endif /* WIRELESS_SPY */
@@ -216,31 +202,32 @@ static struct {
216/********************************************************************/ 202/********************************************************************/
217 203
218/* Used in Event handling. 204/* Used in Event handling.
219 * We avoid nested structres as they break on ARM -- Moustafa */ 205 * We avoid nested structures as they break on ARM -- Moustafa */
220struct hermes_tx_descriptor_802_11 { 206struct hermes_tx_descriptor_802_11 {
221 /* hermes_tx_descriptor */ 207 /* hermes_tx_descriptor */
222 u16 status; 208 __le16 status;
223 u16 reserved1; 209 __le16 reserved1;
224 u16 reserved2; 210 __le16 reserved2;
225 u32 sw_support; 211 __le32 sw_support;
226 u8 retry_count; 212 u8 retry_count;
227 u8 tx_rate; 213 u8 tx_rate;
228 u16 tx_control; 214 __le16 tx_control;
229 215
230 /* ieee802_11_hdr */ 216 /* ieee80211_hdr */
231 u16 frame_ctl; 217 __le16 frame_ctl;
232 u16 duration_id; 218 __le16 duration_id;
233 u8 addr1[ETH_ALEN]; 219 u8 addr1[ETH_ALEN];
234 u8 addr2[ETH_ALEN]; 220 u8 addr2[ETH_ALEN];
235 u8 addr3[ETH_ALEN]; 221 u8 addr3[ETH_ALEN];
236 u16 seq_ctl; 222 __le16 seq_ctl;
237 u8 addr4[ETH_ALEN]; 223 u8 addr4[ETH_ALEN];
238 u16 data_len; 224
225 __le16 data_len;
239 226
240 /* ethhdr */ 227 /* ethhdr */
241 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 228 u8 h_dest[ETH_ALEN]; /* destination eth addr */
242 unsigned char h_source[ETH_ALEN]; /* source ether addr */ 229 u8 h_source[ETH_ALEN]; /* source ether addr */
243 unsigned short h_proto; /* packet type ID field */ 230 __be16 h_proto; /* packet type ID field */
244 231
245 /* p8022_hdr */ 232 /* p8022_hdr */
246 u8 dsap; 233 u8 dsap;
@@ -248,31 +235,31 @@ struct hermes_tx_descriptor_802_11 {
248 u8 ctrl; 235 u8 ctrl;
249 u8 oui[3]; 236 u8 oui[3];
250 237
251 u16 ethertype; 238 __be16 ethertype;
252} __attribute__ ((packed)); 239} __attribute__ ((packed));
253 240
254/* Rx frame header except compatibility 802.3 header */ 241/* Rx frame header except compatibility 802.3 header */
255struct hermes_rx_descriptor { 242struct hermes_rx_descriptor {
256 /* Control */ 243 /* Control */
257 u16 status; 244 __le16 status;
258 u32 time; 245 __le32 time;
259 u8 silence; 246 u8 silence;
260 u8 signal; 247 u8 signal;
261 u8 rate; 248 u8 rate;
262 u8 rxflow; 249 u8 rxflow;
263 u32 reserved; 250 __le32 reserved;
264 251
265 /* 802.11 header */ 252 /* 802.11 header */
266 u16 frame_ctl; 253 __le16 frame_ctl;
267 u16 duration_id; 254 __le16 duration_id;
268 u8 addr1[ETH_ALEN]; 255 u8 addr1[ETH_ALEN];
269 u8 addr2[ETH_ALEN]; 256 u8 addr2[ETH_ALEN];
270 u8 addr3[ETH_ALEN]; 257 u8 addr3[ETH_ALEN];
271 u16 seq_ctl; 258 __le16 seq_ctl;
272 u8 addr4[ETH_ALEN]; 259 u8 addr4[ETH_ALEN];
273 260
274 /* Data length */ 261 /* Data length */
275 u16 data_len; 262 __le16 data_len;
276} __attribute__ ((packed)); 263} __attribute__ ((packed));
277 264
278/********************************************************************/ 265/********************************************************************/
@@ -396,14 +383,14 @@ static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev)
396 /* If a spy address is defined, we report stats of the 383 /* If a spy address is defined, we report stats of the
397 * first spy address - Jean II */ 384 * first spy address - Jean II */
398 if (SPY_NUMBER(priv)) { 385 if (SPY_NUMBER(priv)) {
399 wstats->qual.qual = priv->spy_stat[0].qual; 386 wstats->qual.qual = priv->spy_data.spy_stat[0].qual;
400 wstats->qual.level = priv->spy_stat[0].level; 387 wstats->qual.level = priv->spy_data.spy_stat[0].level;
401 wstats->qual.noise = priv->spy_stat[0].noise; 388 wstats->qual.noise = priv->spy_data.spy_stat[0].noise;
402 wstats->qual.updated = priv->spy_stat[0].updated; 389 wstats->qual.updated = priv->spy_data.spy_stat[0].updated;
403 } 390 }
404 } else { 391 } else {
405 struct { 392 struct {
406 u16 qual, signal, noise; 393 __le16 qual, signal, noise;
407 } __attribute__ ((packed)) cq; 394 } __attribute__ ((packed)) cq;
408 395
409 err = HERMES_READ_RECORD(hw, USER_BAP, 396 err = HERMES_READ_RECORD(hw, USER_BAP,
@@ -503,13 +490,12 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
503 return 0; 490 return 0;
504 } 491 }
505 492
506 /* Check packet length, pad short packets, round up odd length */ 493 /* Length of the packet body */
494 /* FIXME: what if the skb is smaller than this? */
507 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN); 495 len = max_t(int, ALIGN(skb->len, 2), ETH_ZLEN);
508 if (skb->len < len) { 496 skb = skb_padto(skb, len);
509 skb = skb_padto(skb, len); 497 if (skb == NULL)
510 if (skb == NULL) 498 goto fail;
511 goto fail;
512 }
513 len -= ETH_HLEN; 499 len -= ETH_HLEN;
514 500
515 eh = (struct ethhdr *)skb->data; 501 eh = (struct ethhdr *)skb->data;
@@ -556,13 +542,21 @@ static int orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
556 stats->tx_errors++; 542 stats->tx_errors++;
557 goto fail; 543 goto fail;
558 } 544 }
545 /* Actual xfer length - allow for padding */
546 len = ALIGN(data_len, 2);
547 if (len < ETH_ZLEN - ETH_HLEN)
548 len = ETH_ZLEN - ETH_HLEN;
559 } else { /* IEEE 802.3 frame */ 549 } else { /* IEEE 802.3 frame */
560 data_len = len + ETH_HLEN; 550 data_len = len + ETH_HLEN;
561 data_off = HERMES_802_3_OFFSET; 551 data_off = HERMES_802_3_OFFSET;
562 p = skb->data; 552 p = skb->data;
553 /* Actual xfer length - round up for odd length packets */
554 len = ALIGN(data_len, 2);
555 if (len < ETH_ZLEN)
556 len = ETH_ZLEN;
563 } 557 }
564 558
565 err = hermes_bap_pwrite(hw, USER_BAP, p, data_len, 559 err = hermes_bap_pwrite_pad(hw, USER_BAP, p, data_len, len,
566 txfid, data_off); 560 txfid, data_off);
567 if (err) { 561 if (err) {
568 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 562 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
@@ -634,16 +628,17 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
634 struct orinoco_private *priv = netdev_priv(dev); 628 struct orinoco_private *priv = netdev_priv(dev);
635 struct net_device_stats *stats = &priv->stats; 629 struct net_device_stats *stats = &priv->stats;
636 u16 fid = hermes_read_regn(hw, TXCOMPLFID); 630 u16 fid = hermes_read_regn(hw, TXCOMPLFID);
631 u16 status;
637 struct hermes_tx_descriptor_802_11 hdr; 632 struct hermes_tx_descriptor_802_11 hdr;
638 int err = 0; 633 int err = 0;
639 634
640 if (fid == DUMMY_FID) 635 if (fid == DUMMY_FID)
641 return; /* Nothing's really happened */ 636 return; /* Nothing's really happened */
642 637
643 /* Read the frame header */ 638 /* Read part of the frame header - we need status and addr1 */
644 err = hermes_bap_pread(hw, IRQ_BAP, &hdr, 639 err = hermes_bap_pread(hw, IRQ_BAP, &hdr,
645 sizeof(struct hermes_tx_descriptor) + 640 offsetof(struct hermes_tx_descriptor_802_11,
646 sizeof(struct ieee80211_hdr), 641 addr2),
647 fid, 0); 642 fid, 0);
648 643
649 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); 644 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
@@ -663,8 +658,8 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
663 * exceeded, because that's the only status that really mean 658 * exceeded, because that's the only status that really mean
664 * that this particular node went away. 659 * that this particular node went away.
665 * Other errors means that *we* screwed up. - Jean II */ 660 * Other errors means that *we* screwed up. - Jean II */
666 hdr.status = le16_to_cpu(hdr.status); 661 status = le16_to_cpu(hdr.status);
667 if (hdr.status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) { 662 if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) {
668 union iwreq_data wrqu; 663 union iwreq_data wrqu;
669 664
670 /* Copy 802.11 dest address. 665 /* Copy 802.11 dest address.
@@ -723,18 +718,13 @@ static inline int is_ethersnap(void *_hdr)
723static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac, 718static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac,
724 int level, int noise) 719 int level, int noise)
725{ 720{
726 struct orinoco_private *priv = netdev_priv(dev); 721 struct iw_quality wstats;
727 int i; 722 wstats.level = level - 0x95;
728 723 wstats.noise = noise - 0x95;
729 /* Gather wireless spy statistics: for each packet, compare the 724 wstats.qual = (level > noise) ? (level - noise) : 0;
730 * source address with out list, and if match, get the stats... */ 725 wstats.updated = 7;
731 for (i = 0; i < priv->spy_number; i++) 726 /* Update spy records */
732 if (!memcmp(mac, priv->spy_address[i], ETH_ALEN)) { 727 wireless_spy_update(dev, mac, &wstats);
733 priv->spy_stat[i].level = level - 0x95;
734 priv->spy_stat[i].noise = noise - 0x95;
735 priv->spy_stat[i].qual = (level > noise) ? (level - noise) : 0;
736 priv->spy_stat[i].updated = 7;
737 }
738} 728}
739 729
740static void orinoco_stat_gather(struct net_device *dev, 730static void orinoco_stat_gather(struct net_device *dev,
@@ -1055,7 +1045,7 @@ static void orinoco_join_ap(struct net_device *dev)
1055 unsigned long flags; 1045 unsigned long flags;
1056 struct join_req { 1046 struct join_req {
1057 u8 bssid[ETH_ALEN]; 1047 u8 bssid[ETH_ALEN];
1058 u16 channel; 1048 __le16 channel;
1059 } __attribute__ ((packed)) req; 1049 } __attribute__ ((packed)) req;
1060 const int atom_len = offsetof(struct prism2_scan_apinfo, atim); 1050 const int atom_len = offsetof(struct prism2_scan_apinfo, atim);
1061 struct prism2_scan_apinfo *atom = NULL; 1051 struct prism2_scan_apinfo *atom = NULL;
@@ -1070,7 +1060,7 @@ static void orinoco_join_ap(struct net_device *dev)
1070 return; 1060 return;
1071 1061
1072 if (orinoco_lock(priv, &flags) != 0) 1062 if (orinoco_lock(priv, &flags) != 0)
1073 goto out; 1063 goto fail_lock;
1074 1064
1075 /* Sanity checks in case user changed something in the meantime */ 1065 /* Sanity checks in case user changed something in the meantime */
1076 if (! priv->bssid_fixed) 1066 if (! priv->bssid_fixed)
@@ -1115,8 +1105,10 @@ static void orinoco_join_ap(struct net_device *dev)
1115 printk(KERN_ERR "%s: Error issuing join request\n", dev->name); 1105 printk(KERN_ERR "%s: Error issuing join request\n", dev->name);
1116 1106
1117 out: 1107 out:
1118 kfree(buf);
1119 orinoco_unlock(priv, &flags); 1108 orinoco_unlock(priv, &flags);
1109
1110 fail_lock:
1111 kfree(buf);
1120} 1112}
1121 1113
1122/* Send new BSSID to userspace */ 1114/* Send new BSSID to userspace */
@@ -1134,12 +1126,14 @@ static void orinoco_send_wevents(struct net_device *dev)
1134 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID, 1126 err = hermes_read_ltv(hw, IRQ_BAP, HERMES_RID_CURRENTBSSID,
1135 ETH_ALEN, NULL, wrqu.ap_addr.sa_data); 1127 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1136 if (err != 0) 1128 if (err != 0)
1137 return; 1129 goto out;
1138 1130
1139 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1131 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1140 1132
1141 /* Send event to user space */ 1133 /* Send event to user space */
1142 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 1134 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
1135
1136 out:
1143 orinoco_unlock(priv, &flags); 1137 orinoco_unlock(priv, &flags);
1144} 1138}
1145 1139
@@ -1148,8 +1142,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1148 struct orinoco_private *priv = netdev_priv(dev); 1142 struct orinoco_private *priv = netdev_priv(dev);
1149 u16 infofid; 1143 u16 infofid;
1150 struct { 1144 struct {
1151 u16 len; 1145 __le16 len;
1152 u16 type; 1146 __le16 type;
1153 } __attribute__ ((packed)) info; 1147 } __attribute__ ((packed)) info;
1154 int len, type; 1148 int len, type;
1155 int err; 1149 int err;
@@ -2464,6 +2458,10 @@ struct net_device *alloc_orinocodev(int sizeof_card,
2464 dev->get_stats = orinoco_get_stats; 2458 dev->get_stats = orinoco_get_stats;
2465 dev->ethtool_ops = &orinoco_ethtool_ops; 2459 dev->ethtool_ops = &orinoco_ethtool_ops;
2466 dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def; 2460 dev->wireless_handlers = (struct iw_handler_def *)&orinoco_handler_def;
2461#ifdef WIRELESS_SPY
2462 priv->wireless_data.spy_data = &priv->spy_data;
2463 dev->wireless_data = &priv->wireless_data;
2464#endif
2467 dev->change_mtu = orinoco_change_mtu; 2465 dev->change_mtu = orinoco_change_mtu;
2468 dev->set_multicast_list = orinoco_set_multicast_list; 2466 dev->set_multicast_list = orinoco_set_multicast_list;
2469 /* we use the default eth_mac_addr for setting the MAC addr */ 2467 /* we use the default eth_mac_addr for setting the MAC addr */
@@ -2835,7 +2833,7 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2835 } 2833 }
2836 } 2834 }
2837 2835
2838 if ((priv->iw_mode == IW_MODE_ADHOC) && (priv->spy_number == 0)){ 2836 if ((priv->iw_mode == IW_MODE_ADHOC) && (!SPY_NUMBER(priv))){
2839 /* Quality stats meaningless in ad-hoc mode */ 2837 /* Quality stats meaningless in ad-hoc mode */
2840 } else { 2838 } else {
2841 range->max_qual.qual = 0x8b - 0x2f; 2839 range->max_qual.qual = 0x8b - 0x2f;
@@ -2882,6 +2880,14 @@ static int orinoco_ioctl_getiwrange(struct net_device *dev,
2882 range->min_r_time = 0; 2880 range->min_r_time = 0;
2883 range->max_r_time = 65535 * 1000; /* ??? */ 2881 range->max_r_time = 65535 * 1000; /* ??? */
2884 2882
2883 /* Event capability (kernel) */
2884 IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
2885 /* Event capability (driver) */
2886 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
2887 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
2888 IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
2889 IW_EVENT_CAPA_SET(range->event_capa, IWEVTXDROP);
2890
2885 TRACE_EXIT(dev->name); 2891 TRACE_EXIT(dev->name);
2886 2892
2887 return 0; 2893 return 0;
@@ -3841,92 +3847,6 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
3841 return err; 3847 return err;
3842} 3848}
3843 3849
3844/* Spy is used for link quality/strength measurements in Ad-Hoc mode
3845 * Jean II */
3846static int orinoco_ioctl_setspy(struct net_device *dev,
3847 struct iw_request_info *info,
3848 struct iw_point *srq,
3849 char *extra)
3850
3851{
3852 struct orinoco_private *priv = netdev_priv(dev);
3853 struct sockaddr *address = (struct sockaddr *) extra;
3854 int number = srq->length;
3855 int i;
3856 unsigned long flags;
3857
3858 /* Make sure nobody mess with the structure while we do */
3859 if (orinoco_lock(priv, &flags) != 0)
3860 return -EBUSY;
3861
3862 /* orinoco_lock() doesn't disable interrupts, so make sure the
3863 * interrupt rx path don't get confused while we copy */
3864 priv->spy_number = 0;
3865
3866 if (number > 0) {
3867 /* Extract the addresses */
3868 for (i = 0; i < number; i++)
3869 memcpy(priv->spy_address[i], address[i].sa_data,
3870 ETH_ALEN);
3871 /* Reset stats */
3872 memset(priv->spy_stat, 0,
3873 sizeof(struct iw_quality) * IW_MAX_SPY);
3874 /* Set number of addresses */
3875 priv->spy_number = number;
3876 }
3877
3878 /* Now, let the others play */
3879 orinoco_unlock(priv, &flags);
3880
3881 /* Do NOT call commit handler */
3882 return 0;
3883}
3884
3885static int orinoco_ioctl_getspy(struct net_device *dev,
3886 struct iw_request_info *info,
3887 struct iw_point *srq,
3888 char *extra)
3889{
3890 struct orinoco_private *priv = netdev_priv(dev);
3891 struct sockaddr *address = (struct sockaddr *) extra;
3892 int number;
3893 int i;
3894 unsigned long flags;
3895
3896 if (orinoco_lock(priv, &flags) != 0)
3897 return -EBUSY;
3898
3899 number = priv->spy_number;
3900 /* Create address struct */
3901 for (i = 0; i < number; i++) {
3902 memcpy(address[i].sa_data, priv->spy_address[i], ETH_ALEN);
3903 address[i].sa_family = AF_UNIX;
3904 }
3905 if (number > 0) {
3906 /* Create address struct */
3907 for (i = 0; i < number; i++) {
3908 memcpy(address[i].sa_data, priv->spy_address[i],
3909 ETH_ALEN);
3910 address[i].sa_family = AF_UNIX;
3911 }
3912 /* Copy stats */
3913 /* In theory, we should disable irqs while copying the stats
3914 * because the rx path might update it in the middle...
3915 * Bah, who care ? - Jean II */
3916 memcpy(extra + (sizeof(struct sockaddr) * number),
3917 priv->spy_stat, sizeof(struct iw_quality) * number);
3918 }
3919 /* Reset updated flags. */
3920 for (i = 0; i < number; i++)
3921 priv->spy_stat[i].updated = 0;
3922
3923 orinoco_unlock(priv, &flags);
3924
3925 srq->length = number;
3926
3927 return 0;
3928}
3929
3930/* Trigger a scan (look for other cells in the vicinity */ 3850/* Trigger a scan (look for other cells in the vicinity */
3931static int orinoco_ioctl_setscan(struct net_device *dev, 3851static int orinoco_ioctl_setscan(struct net_device *dev,
3932 struct iw_request_info *info, 3852 struct iw_request_info *info,
@@ -3999,7 +3919,7 @@ static int orinoco_ioctl_setscan(struct net_device *dev,
3999 HERMES_HOSTSCAN_SYMBOL_BCAST); 3919 HERMES_HOSTSCAN_SYMBOL_BCAST);
4000 break; 3920 break;
4001 case FIRMWARE_TYPE_INTERSIL: { 3921 case FIRMWARE_TYPE_INTERSIL: {
4002 u16 req[3]; 3922 __le16 req[3];
4003 3923
4004 req[0] = cpu_to_le16(0x3fff); /* All channels */ 3924 req[0] = cpu_to_le16(0x3fff); /* All channels */
4005 req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */ 3925 req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */
@@ -4073,7 +3993,7 @@ static inline int orinoco_translate_scan(struct net_device *dev,
4073 case FIRMWARE_TYPE_INTERSIL: 3993 case FIRMWARE_TYPE_INTERSIL:
4074 offset = 4; 3994 offset = 4;
4075 if (priv->has_hostscan) { 3995 if (priv->has_hostscan) {
4076 atom_len = le16_to_cpup((u16 *)scan); 3996 atom_len = le16_to_cpup((__le16 *)scan);
4077 /* Sanity check for atom_len */ 3997 /* Sanity check for atom_len */
4078 if (atom_len < sizeof(struct prism2_scan_apinfo)) { 3998 if (atom_len < sizeof(struct prism2_scan_apinfo)) {
4079 printk(KERN_ERR "%s: Invalid atom_len in scan data: %d\n", 3999 printk(KERN_ERR "%s: Invalid atom_len in scan data: %d\n",
@@ -4357,8 +4277,10 @@ static const iw_handler orinoco_handler[] = {
4357 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens, 4277 [SIOCSIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
4358 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens, 4278 [SIOCGIWSENS -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
4359 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange, 4279 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
4360 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy, 4280 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
4361 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy, 4281 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
4282 [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
4283 [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
4362 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap, 4284 [SIOCSIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
4363 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap, 4285 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
4364 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan, 4286 [SIOCSIWSCAN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index 2f213a7103fe..7a17bb31fc89 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -7,12 +7,11 @@
7#ifndef _ORINOCO_H 7#ifndef _ORINOCO_H
8#define _ORINOCO_H 8#define _ORINOCO_H
9 9
10#define DRIVER_VERSION "0.15rc2" 10#define DRIVER_VERSION "0.15rc3"
11 11
12#include <linux/types.h>
13#include <linux/spinlock.h>
14#include <linux/netdevice.h> 12#include <linux/netdevice.h>
15#include <linux/wireless.h> 13#include <linux/wireless.h>
14#include <net/iw_handler.h>
16#include <linux/version.h> 15#include <linux/version.h>
17 16
18#include "hermes.h" 17#include "hermes.h"
@@ -28,7 +27,7 @@
28#define ORINOCO_MAX_KEYS 4 27#define ORINOCO_MAX_KEYS 4
29 28
30struct orinoco_key { 29struct orinoco_key {
31 u16 len; /* always stored as little-endian */ 30 __le16 len; /* always stored as little-endian */
32 char data[ORINOCO_MAX_KEY_SIZE]; 31 char data[ORINOCO_MAX_KEY_SIZE];
33} __attribute__ ((packed)); 32} __attribute__ ((packed));
34 33
@@ -36,14 +35,14 @@ struct header_struct {
36 /* 802.3 */ 35 /* 802.3 */
37 u8 dest[ETH_ALEN]; 36 u8 dest[ETH_ALEN];
38 u8 src[ETH_ALEN]; 37 u8 src[ETH_ALEN];
39 u16 len; 38 __be16 len;
40 /* 802.2 */ 39 /* 802.2 */
41 u8 dsap; 40 u8 dsap;
42 u8 ssap; 41 u8 ssap;
43 u8 ctrl; 42 u8 ctrl;
44 /* SNAP */ 43 /* SNAP */
45 u8 oui[3]; 44 u8 oui[3];
46 u16 ethertype; 45 unsigned short ethertype;
47} __attribute__ ((packed)); 46} __attribute__ ((packed));
48 47
49typedef enum { 48typedef enum {
@@ -112,9 +111,8 @@ struct orinoco_private {
112 u16 pm_on, pm_mcast, pm_period, pm_timeout; 111 u16 pm_on, pm_mcast, pm_period, pm_timeout;
113 u16 preamble; 112 u16 preamble;
114#ifdef WIRELESS_SPY 113#ifdef WIRELESS_SPY
115 int spy_number; 114 struct iw_spy_data spy_data; /* iwspy support */
116 u_char spy_address[IW_MAX_SPY][ETH_ALEN]; 115 struct iw_public_data wireless_data;
117 struct iw_quality spy_stat[IW_MAX_SPY];
118#endif 116#endif
119 117
120 /* Configuration dependent variables */ 118 /* Configuration dependent variables */
diff --git a/drivers/net/wireless/orinoco_cs.c b/drivers/net/wireless/orinoco_cs.c
index bedd7f9f23e4..dc1128a00971 100644
--- a/drivers/net/wireless/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco_cs.c
@@ -14,33 +14,16 @@
14#define PFX DRIVER_NAME ": " 14#define PFX DRIVER_NAME ": "
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#ifdef __IN_PCMCIA_PACKAGE__
18#include <pcmcia/k_compat.h>
19#endif /* __IN_PCMCIA_PACKAGE__ */
20
21#include <linux/module.h> 17#include <linux/module.h>
22#include <linux/kernel.h> 18#include <linux/kernel.h>
23#include <linux/init.h> 19#include <linux/init.h>
24#include <linux/sched.h> 20#include <linux/delay.h>
25#include <linux/ptrace.h>
26#include <linux/slab.h>
27#include <linux/string.h>
28#include <linux/ioport.h>
29#include <linux/netdevice.h>
30#include <linux/if_arp.h>
31#include <linux/etherdevice.h>
32#include <linux/wireless.h>
33
34#include <pcmcia/cs_types.h> 21#include <pcmcia/cs_types.h>
35#include <pcmcia/cs.h> 22#include <pcmcia/cs.h>
36#include <pcmcia/cistpl.h> 23#include <pcmcia/cistpl.h>
37#include <pcmcia/cisreg.h> 24#include <pcmcia/cisreg.h>
38#include <pcmcia/ds.h> 25#include <pcmcia/ds.h>
39 26
40#include <asm/uaccess.h>
41#include <asm/io.h>
42#include <asm/system.h>
43
44#include "orinoco.h" 27#include "orinoco.h"
45 28
46/********************************************************************/ 29/********************************************************************/
@@ -97,17 +80,8 @@ static dev_link_t *dev_list; /* = NULL */
97/* Function prototypes */ 80/* Function prototypes */
98/********************************************************************/ 81/********************************************************************/
99 82
100/* device methods */ 83static void orinoco_cs_release(dev_link_t *link);
101static int orinoco_cs_hard_reset(struct orinoco_private *priv); 84static void orinoco_cs_detach(dev_link_t *link);
102
103/* PCMCIA gumpf */
104static void orinoco_cs_config(dev_link_t * link);
105static void orinoco_cs_release(dev_link_t * link);
106static int orinoco_cs_event(event_t event, int priority,
107 event_callback_args_t * args);
108
109static dev_link_t *orinoco_cs_attach(void);
110static void orinoco_cs_detach(dev_link_t *);
111 85
112/********************************************************************/ 86/********************************************************************/
113/* Device methods */ 87/* Device methods */
@@ -603,49 +577,85 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
603 "Pavel Roskin <proski@gnu.org>, et al)"; 577 "Pavel Roskin <proski@gnu.org>, et al)";
604 578
605static struct pcmcia_device_id orinoco_cs_ids[] = { 579static struct pcmcia_device_id orinoco_cs_ids[] = {
606 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), 580 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
607 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), 581 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
608 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), 582 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
609 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), 583 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
610 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), 584 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
611 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), 585 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
612 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), 586 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
613 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), 587 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
614 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), 588 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
615 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), 589 PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
616 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), 590 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
617 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), 591 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
618 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), 592 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
619 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), 593 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
620 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), 594 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
621 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), 595 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
622 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), 596 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
623 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), 597 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
598 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
599 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
600 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
601 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
602 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
603 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
604 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
605 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
606 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
607 PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
624 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3), 608 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
625 PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
626 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5), 609 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
610 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
611 PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092),
612 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
613 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
614 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
627 PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169), 615 PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
616 PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
628 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3), 617 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
618 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
629 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), 619 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
620 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
621 PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39),
630 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), 622 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
631 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), 623 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
632 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae), 624 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
633 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac), 625 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
634 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab), 626 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
627 PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
628 PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
635 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3), 629 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
636 PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c), 630 PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
631 PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
632 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
637 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), 633 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
634 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
635 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
636 PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2),
637 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
638 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
638 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a), 639 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
639 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410), 640 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
640 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3), 641 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
641 PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01), 642 PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
642 PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a), 643 PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
644 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
643 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1), 645 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
646 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
647 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
648 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
649 PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39),
644 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264), 650 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
651 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
645 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9), 652 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
646 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26), 653 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
647 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b), 654 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
648 PCMCIA_DEVICE_PROD_ID1("Symbol Technologies", 0x3f02b4d6), 655 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
656 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
657 PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39),
658 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
649 PCMCIA_DEVICE_NULL, 659 PCMCIA_DEVICE_NULL,
650}; 660};
651MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); 661MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
@@ -656,8 +666,8 @@ static struct pcmcia_driver orinoco_driver = {
656 .name = DRIVER_NAME, 666 .name = DRIVER_NAME,
657 }, 667 },
658 .attach = orinoco_cs_attach, 668 .attach = orinoco_cs_attach,
659 .event = orinoco_cs_event,
660 .detach = orinoco_cs_detach, 669 .detach = orinoco_cs_detach,
670 .event = orinoco_cs_event,
661 .id_table = orinoco_cs_ids, 671 .id_table = orinoco_cs_ids,
662}; 672};
663 673
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index 86fa58e5cfac..d8afd51ff8a5 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -40,29 +40,13 @@
40#define PFX DRIVER_NAME ": " 40#define PFX DRIVER_NAME ": "
41 41
42#include <linux/config.h> 42#include <linux/config.h>
43
44#include <linux/module.h> 43#include <linux/module.h>
45#include <linux/kernel.h> 44#include <linux/kernel.h>
46#include <linux/init.h> 45#include <linux/init.h>
47#include <linux/sched.h> 46#include <linux/delay.h>
48#include <linux/ptrace.h>
49#include <linux/slab.h>
50#include <linux/string.h>
51#include <linux/timer.h>
52#include <linux/ioport.h>
53#include <asm/uaccess.h>
54#include <asm/io.h>
55#include <asm/system.h>
56#include <linux/netdevice.h>
57#include <linux/if_arp.h>
58#include <linux/etherdevice.h>
59#include <linux/list.h>
60#include <linux/pci.h> 47#include <linux/pci.h>
61#include <linux/fcntl.h>
62
63#include <pcmcia/cisreg.h> 48#include <pcmcia/cisreg.h>
64 49
65#include "hermes.h"
66#include "orinoco.h" 50#include "orinoco.h"
67 51
68#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */ 52#define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */
@@ -108,7 +92,7 @@ static int nortel_pci_cor_reset(struct orinoco_private *priv)
108 return 0; 92 return 0;
109} 93}
110 94
111int nortel_pci_hw_init(struct nortel_pci_card *card) 95static int nortel_pci_hw_init(struct nortel_pci_card *card)
112{ 96{
113 int i; 97 int i;
114 u32 reg; 98 u32 reg;
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 42e03438291b..5362c214fc8e 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -93,28 +93,12 @@
93#define PFX DRIVER_NAME ": " 93#define PFX DRIVER_NAME ": "
94 94
95#include <linux/config.h> 95#include <linux/config.h>
96
97#include <linux/module.h> 96#include <linux/module.h>
98#include <linux/kernel.h> 97#include <linux/kernel.h>
99#include <linux/init.h> 98#include <linux/init.h>
100#include <linux/sched.h> 99#include <linux/delay.h>
101#include <linux/ptrace.h>
102#include <linux/slab.h>
103#include <linux/string.h>
104#include <linux/timer.h>
105#include <linux/ioport.h>
106#include <linux/netdevice.h>
107#include <linux/if_arp.h>
108#include <linux/etherdevice.h>
109#include <linux/list.h>
110#include <linux/pci.h> 100#include <linux/pci.h>
111#include <linux/fcntl.h>
112
113#include <asm/uaccess.h>
114#include <asm/io.h>
115#include <asm/system.h>
116 101
117#include "hermes.h"
118#include "orinoco.h" 102#include "orinoco.h"
119 103
120/* All the magic there is from wlan-ng */ 104/* All the magic there is from wlan-ng */
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 7ab05b89fb3f..210e73776545 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -117,29 +117,13 @@
117#define PFX DRIVER_NAME ": " 117#define PFX DRIVER_NAME ": "
118 118
119#include <linux/config.h> 119#include <linux/config.h>
120
121#include <linux/module.h> 120#include <linux/module.h>
122#include <linux/kernel.h> 121#include <linux/kernel.h>
123#include <linux/init.h> 122#include <linux/init.h>
124#include <linux/sched.h> 123#include <linux/delay.h>
125#include <linux/ptrace.h>
126#include <linux/slab.h>
127#include <linux/string.h>
128#include <linux/timer.h>
129#include <linux/ioport.h>
130#include <asm/uaccess.h>
131#include <asm/io.h>
132#include <asm/system.h>
133#include <linux/netdevice.h>
134#include <linux/if_arp.h>
135#include <linux/etherdevice.h>
136#include <linux/list.h>
137#include <linux/pci.h> 124#include <linux/pci.h>
138#include <linux/fcntl.h>
139
140#include <pcmcia/cisreg.h> 125#include <pcmcia/cisreg.h>
141 126
142#include "hermes.h"
143#include "orinoco.h" 127#include "orinoco.h"
144 128
145#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */ 129#define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 85893f42445b..5e68b7026186 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -53,29 +53,13 @@
53#define PFX DRIVER_NAME ": " 53#define PFX DRIVER_NAME ": "
54 54
55#include <linux/config.h> 55#include <linux/config.h>
56
57#include <linux/module.h> 56#include <linux/module.h>
58#include <linux/kernel.h> 57#include <linux/kernel.h>
59#include <linux/init.h> 58#include <linux/init.h>
60#include <linux/sched.h> 59#include <linux/delay.h>
61#include <linux/ptrace.h>
62#include <linux/slab.h>
63#include <linux/string.h>
64#include <linux/timer.h>
65#include <linux/ioport.h>
66#include <asm/uaccess.h>
67#include <asm/io.h>
68#include <asm/system.h>
69#include <linux/netdevice.h>
70#include <linux/if_arp.h>
71#include <linux/etherdevice.h>
72#include <linux/list.h>
73#include <linux/pci.h> 60#include <linux/pci.h>
74#include <linux/fcntl.h>
75
76#include <pcmcia/cisreg.h> 61#include <pcmcia/cisreg.h>
77 62
78#include "hermes.h"
79#include "orinoco.h" 63#include "orinoco.h"
80 64
81#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ 65#define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 9a8790e3580c..5c1a1adf1ff8 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -462,14 +462,12 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
462 /* txpower is supported in dBm's */ 462 /* txpower is supported in dBm's */
463 range->txpower_capa = IW_TXPOW_DBM; 463 range->txpower_capa = IW_TXPOW_DBM;
464 464
465#if WIRELESS_EXT > 16
466 /* Event capability (kernel + driver) */ 465 /* Event capability (kernel + driver) */
467 range->event_capa[0] = (IW_EVENT_CAPA_K_0 | 466 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
468 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | 467 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
469 IW_EVENT_CAPA_MASK(SIOCGIWAP)); 468 IW_EVENT_CAPA_MASK(SIOCGIWAP));
470 range->event_capa[1] = IW_EVENT_CAPA_K_1; 469 range->event_capa[1] = IW_EVENT_CAPA_K_1;
471 range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); 470 range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
472#endif /* WIRELESS_EXT > 16 */
473 471
474 if (islpci_get_state(priv) < PRV_STATE_INIT) 472 if (islpci_get_state(priv) < PRV_STATE_INIT)
475 return 0; 473 return 0;
@@ -693,14 +691,13 @@ prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
693 extra + dwrq->length, 691 extra + dwrq->length,
694 &(bsslist->bsslist[i]), 692 &(bsslist->bsslist[i]),
695 noise); 693 noise);
696#if WIRELESS_EXT > 16 694
697 /* Check if there is space for one more entry */ 695 /* Check if there is space for one more entry */
698 if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) { 696 if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
699 /* Ask user space to try again with a bigger buffer */ 697 /* Ask user space to try again with a bigger buffer */
700 rvalue = -E2BIG; 698 rvalue = -E2BIG;
701 break; 699 break;
702 } 700 }
703#endif /* WIRELESS_EXT > 16 */
704 } 701 }
705 702
706 kfree(bsslist); 703 kfree(bsslist);
@@ -2727,12 +2724,7 @@ const struct iw_handler_def prism54_handler_def = {
2727 .standard = (iw_handler *) prism54_handler, 2724 .standard = (iw_handler *) prism54_handler,
2728 .private = (iw_handler *) prism54_private_handler, 2725 .private = (iw_handler *) prism54_private_handler,
2729 .private_args = (struct iw_priv_args *) prism54_private_args, 2726 .private_args = (struct iw_priv_args *) prism54_private_args,
2730#if WIRELESS_EXT > 16
2731 .get_wireless_stats = prism54_get_wireless_stats, 2727 .get_wireless_stats = prism54_get_wireless_stats,
2732#endif /* WIRELESS_EXT > 16 */
2733#if WIRELESS_EXT == 16
2734 .spy_offset = offsetof(islpci_private, spy_data),
2735#endif /* WIRELESS_EXT == 16 */
2736}; 2728};
2737 2729
2738/* For wpa_supplicant */ 2730/* For wpa_supplicant */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 6f13d4a8e2d3..78bdb359835e 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -439,8 +439,7 @@ prism54_bring_down(islpci_private *priv)
439 wmb(); 439 wmb();
440 440
441 /* wait a while for the device to reset */ 441 /* wait a while for the device to reset */
442 set_current_state(TASK_UNINTERRUPTIBLE); 442 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
443 schedule_timeout(50*HZ/1000);
444 443
445 return 0; 444 return 0;
446} 445}
@@ -491,8 +490,7 @@ islpci_reset_if(islpci_private *priv)
491 /* The software reset acknowledge needs about 220 msec here. 490 /* The software reset acknowledge needs about 220 msec here.
492 * Be conservative and wait for up to one second. */ 491 * Be conservative and wait for up to one second. */
493 492
494 set_current_state(TASK_UNINTERRUPTIBLE); 493 remaining = schedule_timeout_uninterruptible(HZ);
495 remaining = schedule_timeout(HZ);
496 494
497 if(remaining > 0) { 495 if(remaining > 0) {
498 result = 0; 496 result = 0;
@@ -756,8 +754,7 @@ islpci_free_memory(islpci_private *priv)
756 pci_unmap_single(priv->pdev, buf->pci_addr, 754 pci_unmap_single(priv->pdev, buf->pci_addr,
757 buf->size, PCI_DMA_FROMDEVICE); 755 buf->size, PCI_DMA_FROMDEVICE);
758 buf->pci_addr = 0; 756 buf->pci_addr = 0;
759 if (buf->mem) 757 kfree(buf->mem);
760 kfree(buf->mem);
761 buf->size = 0; 758 buf->size = 0;
762 buf->mem = NULL; 759 buf->mem = NULL;
763 } 760 }
@@ -839,13 +836,9 @@ islpci_setup(struct pci_dev *pdev)
839 priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ? 836 priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
840 priv->monitor_type : ARPHRD_ETHER; 837 priv->monitor_type : ARPHRD_ETHER;
841 838
842#if WIRELESS_EXT > 16
843 /* Add pointers to enable iwspy support. */ 839 /* Add pointers to enable iwspy support. */
844 priv->wireless_data.spy_data = &priv->spy_data; 840 priv->wireless_data.spy_data = &priv->spy_data;
845 ndev->wireless_data = &priv->wireless_data; 841 ndev->wireless_data = &priv->wireless_data;
846#else /* WIRELESS_EXT > 16 */
847 ndev->get_wireless_stats = &prism54_get_wireless_stats;
848#endif /* WIRELESS_EXT > 16 */
849 842
850 /* save the start and end address of the PCI memory area */ 843 /* save the start and end address of the PCI memory area */
851 ndev->mem_start = (unsigned long) priv->device_base; 844 ndev->mem_start = (unsigned long) priv->device_base;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 32a1019f1b36..efbed4397951 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -100,9 +100,7 @@ typedef struct {
100 100
101 struct iw_spy_data spy_data; /* iwspy support */ 101 struct iw_spy_data spy_data; /* iwspy support */
102 102
103#if WIRELESS_EXT > 16
104 struct iw_public_data wireless_data; 103 struct iw_public_data wireless_data;
105#endif /* WIRELESS_EXT > 16 */
106 104
107 int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */ 105 int monitor_type; /* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_PRISM */
108 106
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 5952e9960499..3b49efa37ee5 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -97,12 +97,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
97 /* lock the driver code */ 97 /* lock the driver code */
98 spin_lock_irqsave(&priv->slock, flags); 98 spin_lock_irqsave(&priv->slock, flags);
99 99
100 /* determine the amount of fragments needed to store the frame */
101
102 frame_size = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
103 if (init_wds)
104 frame_size += 6;
105
106 /* check whether the destination queue has enough fragments for the frame */ 100 /* check whether the destination queue has enough fragments for the frame */
107 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]); 101 curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
108 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) { 102 if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
@@ -213,6 +207,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
213 /* store the skb address for future freeing */ 207 /* store the skb address for future freeing */
214 priv->data_low_tx[index] = skb; 208 priv->data_low_tx[index] = skb;
215 /* set the proper fragment start address and size information */ 209 /* set the proper fragment start address and size information */
210 frame_size = skb->len;
216 fragment->size = cpu_to_le16(frame_size); 211 fragment->size = cpu_to_le16(frame_size);
217 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */ 212 fragment->flags = cpu_to_le16(0); /* set to 1 if more fragments */
218 fragment->address = cpu_to_le32(pci_map_address); 213 fragment->address = cpu_to_le32(pci_map_address);
@@ -246,12 +241,10 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
246 return 0; 241 return 0;
247 242
248 drop_free: 243 drop_free:
249 /* free the skbuf structure before aborting */
250 dev_kfree_skb(skb);
251 skb = NULL;
252
253 priv->statistics.tx_dropped++; 244 priv->statistics.tx_dropped++;
254 spin_unlock_irqrestore(&priv->slock, flags); 245 spin_unlock_irqrestore(&priv->slock, flags);
246 dev_kfree_skb(skb);
247 skb = NULL;
255 return err; 248 return err;
256} 249}
257 250
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index b6f2e5a223be..6a60c5970cb5 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -137,7 +137,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
137 PCI_DMA_FROMDEVICE); 137 PCI_DMA_FROMDEVICE);
138 if (!buf->pci_addr) { 138 if (!buf->pci_addr) {
139 printk(KERN_WARNING 139 printk(KERN_WARNING
140 "Failed to make memory DMA'able\n."); 140 "Failed to make memory DMA'able.\n");
141 return -ENOMEM; 141 return -ENOMEM;
142 } 142 }
143 } 143 }
@@ -455,7 +455,7 @@ islpci_mgt_transaction(struct net_device *ndev,
455 struct islpci_mgmtframe **recvframe) 455 struct islpci_mgmtframe **recvframe)
456{ 456{
457 islpci_private *priv = netdev_priv(ndev); 457 islpci_private *priv = netdev_priv(ndev);
458 const long wait_cycle_jiffies = (ISL38XX_WAIT_CYCLE * 10 * HZ) / 1000; 458 const long wait_cycle_jiffies = msecs_to_jiffies(ISL38XX_WAIT_CYCLE * 10);
459 long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies; 459 long timeout_left = ISL38XX_MAX_WAIT_CYCLES * wait_cycle_jiffies;
460 int err; 460 int err;
461 DEFINE_WAIT(wait); 461 DEFINE_WAIT(wait);
@@ -475,8 +475,7 @@ islpci_mgt_transaction(struct net_device *ndev,
475 int timeleft; 475 int timeleft;
476 struct islpci_mgmtframe *frame; 476 struct islpci_mgmtframe *frame;
477 477
478 set_current_state(TASK_UNINTERRUPTIBLE); 478 timeleft = schedule_timeout_uninterruptible(wait_cycle_jiffies);
479 timeleft = schedule_timeout(wait_cycle_jiffies);
480 frame = xchg(&priv->mgmt_received, NULL); 479 frame = xchg(&priv->mgmt_received, NULL);
481 if (frame) { 480 if (frame) {
482 if (frame->header->oid == oid) { 481 if (frame->header->oid == oid) {
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 12123e24b113..eea2f04c8c6d 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -268,11 +268,10 @@ mgt_clean(islpci_private *priv)
268 268
269 if (!priv->mib) 269 if (!priv->mib)
270 return; 270 return;
271 for (i = 0; i < OID_NUM_LAST; i++) 271 for (i = 0; i < OID_NUM_LAST; i++) {
272 if (priv->mib[i]) { 272 kfree(priv->mib[i]);
273 kfree(priv->mib[i]); 273 priv->mib[i] = NULL;
274 priv->mib[i] = NULL; 274 }
275 }
276 kfree(priv->mib); 275 kfree(priv->mib);
277 priv->mib = NULL; 276 priv->mib = NULL;
278} 277}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e9c5ea0f5535..70fd6fd8feb9 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1649,28 +1649,28 @@ static iw_stats * ray_get_wireless_stats(struct net_device * dev)
1649 */ 1649 */
1650 1650
1651static const iw_handler ray_handler[] = { 1651static const iw_handler ray_handler[] = {
1652 [SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) ray_commit, 1652 [SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) ray_commit,
1653 [SIOCGIWNAME -SIOCIWFIRST] (iw_handler) ray_get_name, 1653 [SIOCGIWNAME -SIOCIWFIRST] = (iw_handler) ray_get_name,
1654 [SIOCSIWFREQ -SIOCIWFIRST] (iw_handler) ray_set_freq, 1654 [SIOCSIWFREQ -SIOCIWFIRST] = (iw_handler) ray_set_freq,
1655 [SIOCGIWFREQ -SIOCIWFIRST] (iw_handler) ray_get_freq, 1655 [SIOCGIWFREQ -SIOCIWFIRST] = (iw_handler) ray_get_freq,
1656 [SIOCSIWMODE -SIOCIWFIRST] (iw_handler) ray_set_mode, 1656 [SIOCSIWMODE -SIOCIWFIRST] = (iw_handler) ray_set_mode,
1657 [SIOCGIWMODE -SIOCIWFIRST] (iw_handler) ray_get_mode, 1657 [SIOCGIWMODE -SIOCIWFIRST] = (iw_handler) ray_get_mode,
1658 [SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) ray_get_range, 1658 [SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) ray_get_range,
1659#ifdef WIRELESS_SPY 1659#ifdef WIRELESS_SPY
1660 [SIOCSIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_set_spy, 1660 [SIOCSIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_set_spy,
1661 [SIOCGIWSPY -SIOCIWFIRST] (iw_handler) iw_handler_get_spy, 1661 [SIOCGIWSPY -SIOCIWFIRST] = (iw_handler) iw_handler_get_spy,
1662 [SIOCSIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_set_thrspy, 1662 [SIOCSIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy,
1663 [SIOCGIWTHRSPY-SIOCIWFIRST] (iw_handler) iw_handler_get_thrspy, 1663 [SIOCGIWTHRSPY-SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy,
1664#endif /* WIRELESS_SPY */ 1664#endif /* WIRELESS_SPY */
1665 [SIOCGIWAP -SIOCIWFIRST] (iw_handler) ray_get_wap, 1665 [SIOCGIWAP -SIOCIWFIRST] = (iw_handler) ray_get_wap,
1666 [SIOCSIWESSID -SIOCIWFIRST] (iw_handler) ray_set_essid, 1666 [SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) ray_set_essid,
1667 [SIOCGIWESSID -SIOCIWFIRST] (iw_handler) ray_get_essid, 1667 [SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) ray_get_essid,
1668 [SIOCSIWRATE -SIOCIWFIRST] (iw_handler) ray_set_rate, 1668 [SIOCSIWRATE -SIOCIWFIRST] = (iw_handler) ray_set_rate,
1669 [SIOCGIWRATE -SIOCIWFIRST] (iw_handler) ray_get_rate, 1669 [SIOCGIWRATE -SIOCIWFIRST] = (iw_handler) ray_get_rate,
1670 [SIOCSIWRTS -SIOCIWFIRST] (iw_handler) ray_set_rts, 1670 [SIOCSIWRTS -SIOCIWFIRST] = (iw_handler) ray_set_rts,
1671 [SIOCGIWRTS -SIOCIWFIRST] (iw_handler) ray_get_rts, 1671 [SIOCGIWRTS -SIOCIWFIRST] = (iw_handler) ray_get_rts,
1672 [SIOCSIWFRAG -SIOCIWFIRST] (iw_handler) ray_set_frag, 1672 [SIOCSIWFRAG -SIOCIWFIRST] = (iw_handler) ray_set_frag,
1673 [SIOCGIWFRAG -SIOCIWFIRST] (iw_handler) ray_get_frag, 1673 [SIOCGIWFRAG -SIOCIWFIRST] = (iw_handler) ray_get_frag,
1674}; 1674};
1675 1675
1676#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */ 1676#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1678,9 +1678,9 @@ static const iw_handler ray_handler[] = {
1678#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */ 1678#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
1679 1679
1680static const iw_handler ray_private_handler[] = { 1680static const iw_handler ray_private_handler[] = {
1681 [0] (iw_handler) ray_set_framing, 1681 [0] = (iw_handler) ray_set_framing,
1682 [1] (iw_handler) ray_get_framing, 1682 [1] = (iw_handler) ray_get_framing,
1683 [3] (iw_handler) ray_get_country, 1683 [3] = (iw_handler) ray_get_country,
1684}; 1684};
1685 1685
1686static const struct iw_priv_args ray_private_args[] = { 1686static const struct iw_priv_args ray_private_args[] = {
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 39c6cdf7f3f7..b1bbc8e8e91f 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -22,58 +22,23 @@
22#define PFX DRIVER_NAME ": " 22#define PFX DRIVER_NAME ": "
23 23
24#include <linux/config.h> 24#include <linux/config.h>
25#ifdef __IN_PCMCIA_PACKAGE__
26#include <pcmcia/k_compat.h>
27#endif /* __IN_PCMCIA_PACKAGE__ */
28
29#include <linux/module.h> 25#include <linux/module.h>
30#include <linux/kernel.h> 26#include <linux/kernel.h>
31#include <linux/init.h> 27#include <linux/init.h>
32#include <linux/sched.h> 28#include <linux/delay.h>
33#include <linux/ptrace.h> 29#include <linux/firmware.h>
34#include <linux/slab.h>
35#include <linux/string.h>
36#include <linux/ioport.h>
37#include <linux/netdevice.h>
38#include <linux/if_arp.h>
39#include <linux/etherdevice.h>
40#include <linux/wireless.h>
41
42#include <pcmcia/cs_types.h> 30#include <pcmcia/cs_types.h>
43#include <pcmcia/cs.h> 31#include <pcmcia/cs.h>
44#include <pcmcia/cistpl.h> 32#include <pcmcia/cistpl.h>
45#include <pcmcia/cisreg.h> 33#include <pcmcia/cisreg.h>
46#include <pcmcia/ds.h> 34#include <pcmcia/ds.h>
47 35
48#include <asm/uaccess.h>
49#include <asm/io.h>
50#include <asm/system.h>
51
52#include "orinoco.h" 36#include "orinoco.h"
53 37
54/*
55 * If SPECTRUM_FW_INCLUDED is defined, the firmware is hardcoded into
56 * the driver. Use get_symbol_fw script to generate spectrum_fw.h and
57 * copy it to the same directory as spectrum_cs.c.
58 *
59 * If SPECTRUM_FW_INCLUDED is not defined, the firmware is loaded at the
60 * runtime using hotplug. Use the same get_symbol_fw script to generate
61 * files symbol_sp24t_prim_fw symbol_sp24t_sec_fw, copy them to the
62 * hotplug firmware directory (typically /usr/lib/hotplug/firmware) and
63 * make sure that you have hotplug installed and enabled in the kernel.
64 */
65/* #define SPECTRUM_FW_INCLUDED 1 */
66
67#ifdef SPECTRUM_FW_INCLUDED
68/* Header with the firmware */
69#include "spectrum_fw.h"
70#else /* !SPECTRUM_FW_INCLUDED */
71#include <linux/firmware.h>
72static unsigned char *primsym; 38static unsigned char *primsym;
73static unsigned char *secsym; 39static unsigned char *secsym;
74static const char primary_fw_name[] = "symbol_sp24t_prim_fw"; 40static const char primary_fw_name[] = "symbol_sp24t_prim_fw";
75static const char secondary_fw_name[] = "symbol_sp24t_sec_fw"; 41static const char secondary_fw_name[] = "symbol_sp24t_sec_fw";
76#endif /* !SPECTRUM_FW_INCLUDED */
77 42
78/********************************************************************/ 43/********************************************************************/
79/* Module stuff */ 44/* Module stuff */
@@ -124,17 +89,8 @@ static dev_link_t *dev_list; /* = NULL */
124/* Function prototypes */ 89/* Function prototypes */
125/********************************************************************/ 90/********************************************************************/
126 91
127/* device methods */ 92static void spectrum_cs_release(dev_link_t *link);
128static int spectrum_cs_hard_reset(struct orinoco_private *priv); 93static void spectrum_cs_detach(dev_link_t *link);
129
130/* PCMCIA gumpf */
131static void spectrum_cs_config(dev_link_t * link);
132static void spectrum_cs_release(dev_link_t * link);
133static int spectrum_cs_event(event_t event, int priority,
134 event_callback_args_t * args);
135
136static dev_link_t *spectrum_cs_attach(void);
137static void spectrum_cs_detach(dev_link_t *);
138 94
139/********************************************************************/ 95/********************************************************************/
140/* Firmware downloader */ 96/* Firmware downloader */
@@ -182,8 +138,8 @@ static void spectrum_cs_detach(dev_link_t *);
182 * Each block has the following structure. 138 * Each block has the following structure.
183 */ 139 */
184struct dblock { 140struct dblock {
185 u32 _addr; /* adapter address where to write the block */ 141 __le32 _addr; /* adapter address where to write the block */
186 u16 _len; /* length of the data only, in bytes */ 142 __le16 _len; /* length of the data only, in bytes */
187 char data[0]; /* data to be written */ 143 char data[0]; /* data to be written */
188} __attribute__ ((packed)); 144} __attribute__ ((packed));
189 145
@@ -193,9 +149,9 @@ struct dblock {
193 * items with matching ID should be written. 149 * items with matching ID should be written.
194 */ 150 */
195struct pdr { 151struct pdr {
196 u32 _id; /* record ID */ 152 __le32 _id; /* record ID */
197 u32 _addr; /* adapter address where to write the data */ 153 __le32 _addr; /* adapter address where to write the data */
198 u32 _len; /* expected length of the data, in bytes */ 154 __le32 _len; /* expected length of the data, in bytes */
199 char next[0]; /* next PDR starts here */ 155 char next[0]; /* next PDR starts here */
200} __attribute__ ((packed)); 156} __attribute__ ((packed));
201 157
@@ -206,8 +162,8 @@ struct pdr {
206 * be plugged into the secondary firmware. 162 * be plugged into the secondary firmware.
207 */ 163 */
208struct pdi { 164struct pdi {
209 u16 _len; /* length of ID and data, in words */ 165 __le16 _len; /* length of ID and data, in words */
210 u16 _id; /* record ID */ 166 __le16 _id; /* record ID */
211 char data[0]; /* plug data */ 167 char data[0]; /* plug data */
212} __attribute__ ((packed));; 168} __attribute__ ((packed));;
213 169
@@ -414,7 +370,7 @@ spectrum_plug_pdi(hermes_t *hw, struct pdr *first_pdr, struct pdi *pdi)
414 370
415/* Read PDA from the adapter */ 371/* Read PDA from the adapter */
416static int 372static int
417spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len) 373spectrum_read_pda(hermes_t *hw, __le16 *pda, int pda_len)
418{ 374{
419 int ret; 375 int ret;
420 int pda_size; 376 int pda_size;
@@ -445,7 +401,7 @@ spectrum_read_pda(hermes_t *hw, u16 *pda, int pda_len)
445/* Parse PDA and write the records into the adapter */ 401/* Parse PDA and write the records into the adapter */
446static int 402static int
447spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block, 403spectrum_apply_pda(hermes_t *hw, const struct dblock *first_block,
448 u16 *pda) 404 __le16 *pda)
449{ 405{
450 int ret; 406 int ret;
451 struct pdi *pdi; 407 struct pdi *pdi;
@@ -511,7 +467,7 @@ spectrum_dl_image(hermes_t *hw, dev_link_t *link,
511 const struct dblock *first_block; 467 const struct dblock *first_block;
512 468
513 /* Plug Data Area (PDA) */ 469 /* Plug Data Area (PDA) */
514 u16 pda[PDA_WORDS]; 470 __le16 pda[PDA_WORDS];
515 471
516 /* Binary block begins after the 0x1A marker */ 472 /* Binary block begins after the 0x1A marker */
517 ptr = image; 473 ptr = image;
@@ -571,8 +527,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
571{ 527{
572 int ret; 528 int ret;
573 client_handle_t handle = link->handle; 529 client_handle_t handle = link->handle;
574
575#ifndef SPECTRUM_FW_INCLUDED
576 const struct firmware *fw_entry; 530 const struct firmware *fw_entry;
577 531
578 if (request_firmware(&fw_entry, primary_fw_name, 532 if (request_firmware(&fw_entry, primary_fw_name,
@@ -592,7 +546,6 @@ spectrum_dl_firmware(hermes_t *hw, dev_link_t *link)
592 secondary_fw_name); 546 secondary_fw_name);
593 return -ENOENT; 547 return -ENOENT;
594 } 548 }
595#endif
596 549
597 /* Load primary firmware */ 550 /* Load primary firmware */
598 ret = spectrum_dl_image(hw, link, primsym); 551 ret = spectrum_dl_image(hw, link, primsym);
@@ -1085,7 +1038,7 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
1085static struct pcmcia_device_id spectrum_cs_ids[] = { 1038static struct pcmcia_device_id spectrum_cs_ids[] = {
1086 PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */ 1039 PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4100 */
1087 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ 1040 PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */
1088 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0001), /* Intel PRO/Wireless 2011B */ 1041 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */
1089 PCMCIA_DEVICE_NULL, 1042 PCMCIA_DEVICE_NULL,
1090}; 1043};
1091MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids); 1044MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids);
@@ -1096,8 +1049,8 @@ static struct pcmcia_driver orinoco_driver = {
1096 .name = DRIVER_NAME, 1049 .name = DRIVER_NAME,
1097 }, 1050 },
1098 .attach = spectrum_cs_attach, 1051 .attach = spectrum_cs_attach,
1099 .event = spectrum_cs_event,
1100 .detach = spectrum_cs_detach, 1052 .detach = spectrum_cs_detach,
1053 .event = spectrum_cs_event,
1101 .id_table = spectrum_cs_ids, 1054 .id_table = spectrum_cs_ids,
1102}; 1055};
1103 1056
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 7bc7fc823128..d25264ba0c0e 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -860,12 +860,9 @@ static int allocate_buffers(struct strip *strip_info, int mtu)
860 strip_info->mtu = dev->mtu = mtu; 860 strip_info->mtu = dev->mtu = mtu;
861 return (1); 861 return (1);
862 } 862 }
863 if (r) 863 kfree(r);
864 kfree(r); 864 kfree(s);
865 if (s) 865 kfree(t);
866 kfree(s);
867 if (t)
868 kfree(t);
869 return (0); 866 return (0);
870} 867}
871 868
@@ -922,13 +919,9 @@ static int strip_change_mtu(struct net_device *dev, int new_mtu)
922 printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n", 919 printk(KERN_NOTICE "%s: strip MTU changed fom %d to %d.\n",
923 strip_info->dev->name, old_mtu, strip_info->mtu); 920 strip_info->dev->name, old_mtu, strip_info->mtu);
924 921
925 if (orbuff) 922 kfree(orbuff);
926 kfree(orbuff); 923 kfree(osbuff);
927 if (osbuff) 924 kfree(otbuff);
928 kfree(osbuff);
929 if (otbuff)
930 kfree(otbuff);
931
932 return 0; 925 return 0;
933} 926}
934 927
@@ -2498,18 +2491,13 @@ static int strip_close_low(struct net_device *dev)
2498 /* 2491 /*
2499 * Free all STRIP frame buffers. 2492 * Free all STRIP frame buffers.
2500 */ 2493 */
2501 if (strip_info->rx_buff) { 2494 kfree(strip_info->rx_buff);
2502 kfree(strip_info->rx_buff); 2495 strip_info->rx_buff = NULL;
2503 strip_info->rx_buff = NULL; 2496 kfree(strip_info->sx_buff);
2504 } 2497 strip_info->sx_buff = NULL;
2505 if (strip_info->sx_buff) { 2498 kfree(strip_info->tx_buff);
2506 kfree(strip_info->sx_buff); 2499 strip_info->tx_buff = NULL;
2507 strip_info->sx_buff = NULL; 2500
2508 }
2509 if (strip_info->tx_buff) {
2510 kfree(strip_info->tx_buff);
2511 strip_info->tx_buff = NULL;
2512 }
2513 del_timer(&strip_info->idle_timer); 2501 del_timer(&strip_info->idle_timer);
2514 return 0; 2502 return 0;
2515} 2503}
diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c
index 7a5e20a17890..b0d8b5b03152 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/net/wireless/wavelan.c
@@ -430,7 +430,6 @@ static void fee_read(unsigned long ioaddr, /* I/O port of the card */
430 } 430 }
431} 431}
432 432
433#ifdef WIRELESS_EXT /* if the wireless extension exists in the kernel */
434 433
435/*------------------------------------------------------------------*/ 434/*------------------------------------------------------------------*/
436/* 435/*
@@ -514,7 +513,6 @@ static void fee_write(unsigned long ioaddr, /* I/O port of the card */
514 fee_wait(ioaddr, 10, 100); 513 fee_wait(ioaddr, 10, 100);
515#endif /* EEPROM_IS_PROTECTED */ 514#endif /* EEPROM_IS_PROTECTED */
516} 515}
517#endif /* WIRELESS_EXT */
518 516
519/************************ I82586 SUBROUTINES *************************/ 517/************************ I82586 SUBROUTINES *************************/
520/* 518/*
@@ -973,11 +971,9 @@ static void wv_mmc_show(struct net_device * dev)
973 mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m)); 971 mmc_read(ioaddr, 0, (u8 *) & m, sizeof(m));
974 mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0); 972 mmc_out(ioaddr, mmwoff(0, mmw_freeze), 0);
975 973
976#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */
977 /* Don't forget to update statistics */ 974 /* Don't forget to update statistics */
978 lp->wstats.discard.nwid += 975 lp->wstats.discard.nwid +=
979 (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l; 976 (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
980#endif /* WIRELESS_EXT */
981 977
982 printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n"); 978 printk(KERN_DEBUG "##### WaveLAN modem status registers: #####\n");
983#ifdef DEBUG_SHOW_UNUSED 979#ifdef DEBUG_SHOW_UNUSED
@@ -1499,7 +1495,6 @@ static int wavelan_set_mac_address(struct net_device * dev, void *addr)
1499} 1495}
1500#endif /* SET_MAC_ADDRESS */ 1496#endif /* SET_MAC_ADDRESS */
1501 1497
1502#ifdef WIRELESS_EXT /* if wireless extensions exist in the kernel */
1503 1498
1504/*------------------------------------------------------------------*/ 1499/*------------------------------------------------------------------*/
1505/* 1500/*
@@ -2473,7 +2468,6 @@ static iw_stats *wavelan_get_wireless_stats(struct net_device * dev)
2473#endif 2468#endif
2474 return &lp->wstats; 2469 return &lp->wstats;
2475} 2470}
2476#endif /* WIRELESS_EXT */
2477 2471
2478/************************* PACKET RECEPTION *************************/ 2472/************************* PACKET RECEPTION *************************/
2479/* 2473/*
@@ -4194,11 +4188,9 @@ static int __init wavelan_config(struct net_device *dev, unsigned short ioaddr)
4194 dev->set_mac_address = &wavelan_set_mac_address; 4188 dev->set_mac_address = &wavelan_set_mac_address;
4195#endif /* SET_MAC_ADDRESS */ 4189#endif /* SET_MAC_ADDRESS */
4196 4190
4197#ifdef WIRELESS_EXT /* if wireless extension exists in the kernel */
4198 dev->wireless_handlers = &wavelan_handler_def; 4191 dev->wireless_handlers = &wavelan_handler_def;
4199 lp->wireless_data.spy_data = &lp->spy_data; 4192 lp->wireless_data.spy_data = &lp->spy_data;
4200 dev->wireless_data = &lp->wireless_data; 4193 dev->wireless_data = &lp->wireless_data;
4201#endif
4202 4194
4203 dev->mtu = WAVELAN_MTU; 4195 dev->mtu = WAVELAN_MTU;
4204 4196
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/net/wireless/wavelan.p.h
index 509ff22a6caa..166e28b9a4f7 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/net/wireless/wavelan.p.h
@@ -409,11 +409,9 @@
409#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical). */ 409#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical). */
410#undef SET_MAC_ADDRESS /* Experimental */ 410#undef SET_MAC_ADDRESS /* Experimental */
411 411
412#ifdef WIRELESS_EXT /* If wireless extensions exist in the kernel */
413/* Warning: this stuff will slow down the driver. */ 412/* Warning: this stuff will slow down the driver. */
414#define WIRELESS_SPY /* Enable spying addresses. */ 413#define WIRELESS_SPY /* Enable spying addresses. */
415#undef HISTOGRAM /* Enable histogram of signal level. */ 414#undef HISTOGRAM /* Enable histogram of signal level. */
416#endif
417 415
418/****************************** DEBUG ******************************/ 416/****************************** DEBUG ******************************/
419 417
@@ -506,12 +504,10 @@ struct net_local
506 u_short tx_first_free; 504 u_short tx_first_free;
507 u_short tx_first_in_use; 505 u_short tx_first_in_use;
508 506
509#ifdef WIRELESS_EXT
510 iw_stats wstats; /* Wireless-specific statistics */ 507 iw_stats wstats; /* Wireless-specific statistics */
511 508
512 struct iw_spy_data spy_data; 509 struct iw_spy_data spy_data;
513 struct iw_public_data wireless_data; 510 struct iw_public_data wireless_data;
514#endif
515 511
516#ifdef HISTOGRAM 512#ifdef HISTOGRAM
517 int his_number; /* number of intervals */ 513 int his_number; /* number of intervals */
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 183c4732ef65..4b3c98f5c564 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -415,7 +415,6 @@ fee_read(u_long base, /* i/o port of the card */
415 } 415 }
416} 416}
417 417
418#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
419 418
420/*------------------------------------------------------------------*/ 419/*------------------------------------------------------------------*/
421/* 420/*
@@ -500,7 +499,6 @@ fee_write(u_long base, /* i/o port of the card */
500 fee_wait(base, 10, 100); 499 fee_wait(base, 10, 100);
501#endif /* EEPROM_IS_PROTECTED */ 500#endif /* EEPROM_IS_PROTECTED */
502} 501}
503#endif /* WIRELESS_EXT */
504 502
505/******************* WaveLAN Roaming routines... ********************/ 503/******************* WaveLAN Roaming routines... ********************/
506 504
@@ -1161,10 +1159,8 @@ wv_mmc_show(struct net_device * dev)
1161 mmc_read(base, 0, (u_char *)&m, sizeof(m)); 1159 mmc_read(base, 0, (u_char *)&m, sizeof(m));
1162 mmc_out(base, mmwoff(0, mmw_freeze), 0); 1160 mmc_out(base, mmwoff(0, mmw_freeze), 0);
1163 1161
1164#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
1165 /* Don't forget to update statistics */ 1162 /* Don't forget to update statistics */
1166 lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l; 1163 lp->wstats.discard.nwid += (m.mmr_wrong_nwid_h << 8) | m.mmr_wrong_nwid_l;
1167#endif /* WIRELESS_EXT */
1168 1164
1169 spin_unlock_irqrestore(&lp->spinlock, flags); 1165 spin_unlock_irqrestore(&lp->spinlock, flags);
1170 1166
@@ -1550,7 +1546,6 @@ wavelan_set_mac_address(struct net_device * dev,
1550} 1546}
1551#endif /* SET_MAC_ADDRESS */ 1547#endif /* SET_MAC_ADDRESS */
1552 1548
1553#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
1554 1549
1555/*------------------------------------------------------------------*/ 1550/*------------------------------------------------------------------*/
1556/* 1551/*
@@ -2793,7 +2788,6 @@ wavelan_get_wireless_stats(struct net_device * dev)
2793#endif 2788#endif
2794 return &lp->wstats; 2789 return &lp->wstats;
2795} 2790}
2796#endif /* WIRELESS_EXT */
2797 2791
2798/************************* PACKET RECEPTION *************************/ 2792/************************* PACKET RECEPTION *************************/
2799/* 2793/*
@@ -4679,11 +4673,9 @@ wavelan_attach(void)
4679 dev->watchdog_timeo = WATCHDOG_JIFFIES; 4673 dev->watchdog_timeo = WATCHDOG_JIFFIES;
4680 SET_ETHTOOL_OPS(dev, &ops); 4674 SET_ETHTOOL_OPS(dev, &ops);
4681 4675
4682#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
4683 dev->wireless_handlers = &wavelan_handler_def; 4676 dev->wireless_handlers = &wavelan_handler_def;
4684 lp->wireless_data.spy_data = &lp->spy_data; 4677 lp->wireless_data.spy_data = &lp->spy_data;
4685 dev->wireless_data = &lp->wireless_data; 4678 dev->wireless_data = &lp->wireless_data;
4686#endif
4687 4679
4688 /* Other specific data */ 4680 /* Other specific data */
4689 dev->mtu = WAVELAN_MTU; 4681 dev->mtu = WAVELAN_MTU;
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 01d882be8790..724a715089c9 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -472,11 +472,9 @@
472#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */ 472#define MULTICAST_AVOID /* Avoid extra multicast (I'm sceptical) */
473#undef SET_MAC_ADDRESS /* Experimental */ 473#undef SET_MAC_ADDRESS /* Experimental */
474 474
475#ifdef WIRELESS_EXT /* If wireless extension exist in the kernel */
476/* Warning : these stuff will slow down the driver... */ 475/* Warning : these stuff will slow down the driver... */
477#define WIRELESS_SPY /* Enable spying addresses */ 476#define WIRELESS_SPY /* Enable spying addresses */
478#undef HISTOGRAM /* Enable histogram of sig level... */ 477#undef HISTOGRAM /* Enable histogram of sig level... */
479#endif
480 478
481/****************************** DEBUG ******************************/ 479/****************************** DEBUG ******************************/
482 480
@@ -624,12 +622,10 @@ struct net_local
624 int rfp; /* Last DMA machine receive pointer */ 622 int rfp; /* Last DMA machine receive pointer */
625 int overrunning; /* Receiver overrun flag */ 623 int overrunning; /* Receiver overrun flag */
626 624
627#ifdef WIRELESS_EXT
628 iw_stats wstats; /* Wireless specific stats */ 625 iw_stats wstats; /* Wireless specific stats */
629 626
630 struct iw_spy_data spy_data; 627 struct iw_spy_data spy_data;
631 struct iw_public_data wireless_data; 628 struct iw_public_data wireless_data;
632#endif
633 629
634#ifdef HISTOGRAM 630#ifdef HISTOGRAM
635 int his_number; /* Number of intervals */ 631 int his_number; /* Number of intervals */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 7fcbe589c3f2..4303c50c2ab6 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
548 548
549struct wl3501_80211_tx_hdr { 549struct wl3501_80211_tx_hdr {
550 struct wl3501_80211_tx_plcp_hdr pclp_hdr; 550 struct wl3501_80211_tx_plcp_hdr pclp_hdr;
551 struct ieee80211_hdr mac_hdr; 551 struct ieee80211_hdr_4addr mac_hdr;
552} __attribute__ ((packed)); 552} __attribute__ ((packed));
553 553
554/* 554/*