aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c1
-rw-r--r--drivers/net/3c515.c3
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/8139cp.c120
-rw-r--r--drivers/net/8139too.c7
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/8390.c10
-rw-r--r--drivers/net/Kconfig76
-rw-r--r--drivers/net/Makefile14
-rw-r--r--drivers/net/ac3200.c3
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c7
-rw-r--r--drivers/net/appletalk/Kconfig2
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c2
-rw-r--r--drivers/net/at1700.c6
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/au1000_eth.c3
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/bnx2.c51
-rw-r--r--drivers/net/bnx2.h12
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/chelsio/cxgb2.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/defxx.c2
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/dm9000.c18
-rw-r--r--drivers/net/e100.c36
-rw-r--r--drivers/net/e1000/e1000.h6
-rw-r--r--drivers/net/e1000/e1000_ethtool.c257
-rw-r--r--drivers/net/e1000/e1000_hw.c1166
-rw-r--r--drivers/net/e1000/e1000_hw.h58
-rw-r--r--drivers/net/e1000/e1000_main.c173
-rw-r--r--drivers/net/e1000/e1000_param.c161
-rw-r--r--drivers/net/e2100.c4
-rw-r--r--drivers/net/eepro.c6
-rw-r--r--drivers/net/eepro100.c2
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/epic100.c7
-rw-r--r--drivers/net/es3210.c3
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/fealnx.c10
-rw-r--r--drivers/net/fec.c2
-rw-r--r--drivers/net/forcedeth.c560
-rw-r--r--drivers/net/fs_enet/Makefile6
-rw-r--r--drivers/net/fs_enet/fec.h42
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c207
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c505
-rw-r--r--drivers/net/fs_enet/fs_enet.h40
-rw-r--r--drivers/net/fs_enet/mac-fcc.c32
-rw-r--r--drivers/net/fs_enet/mac-fec.c142
-rw-r--r--drivers/net/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c448
-rw-r--r--drivers/net/fs_enet/mii-fec.c243
-rw-r--r--drivers/net/fs_enet/mii-fixed.c91
-rw-r--r--drivers/net/gianfar.c3
-rw-r--r--drivers/net/hamachi.c6
-rw-r--r--drivers/net/hp100.c1
-rw-r--r--drivers/net/ioc3-eth.c4
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/w83977af_ir.c1
-rw-r--r--drivers/net/ixgb/ixgb.h5
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ixgb/ixgb_hw.c11
-rw-r--r--drivers/net/ixgb/ixgb_ids.h1
-rw-r--r--drivers/net/ixgb/ixgb_main.c152
-rw-r--r--drivers/net/lance.c6
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c256
-rw-r--r--drivers/net/myri10ge/myri10ge_mcp.h47
-rw-r--r--drivers/net/natsemi.c9
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/pci-skeleton.c7
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c4
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c15
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c5
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c18
-rw-r--r--drivers/net/pcnet32.c27
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/fixed.c358
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c51
-rw-r--r--drivers/net/phy/smsc.c1
-rw-r--r--drivers/net/phy/vitesse.c1
-rw-r--r--drivers/net/ppp_generic.c30
-rw-r--r--drivers/net/qla3xxx.c3537
-rw-r--r--drivers/net/qla3xxx.h1194
-rw-r--r--drivers/net/r8169.c2
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c389
-rw-r--r--drivers/net/s2io.h10
-rw-r--r--drivers/net/saa9730.c2
-rw-r--r--drivers/net/sb1250-mac.c9
-rw-r--r--drivers/net/seeq8005.c2
-rw-r--r--drivers/net/sis190.c5
-rw-r--r--drivers/net/sis900.c3
-rw-r--r--drivers/net/sk98lin/skge.c2
-rw-r--r--drivers/net/skfp/skfddi.c2
-rw-r--r--drivers/net/skge.c211
-rw-r--r--drivers/net/skge.h1
-rw-r--r--drivers/net/sky2.c189
-rw-r--r--drivers/net/sky2.h3
-rw-r--r--drivers/net/slhc.c28
-rw-r--r--drivers/net/smc911x.c3
-rw-r--r--drivers/net/smc91x.c8
-rw-r--r--drivers/net/smc91x.h29
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h3
-rw-r--r--drivers/net/spider_net_ethtool.c13
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/sundance.c23
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunlance.c35
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tg3.c59
-rw-r--r--drivers/net/tg3.h8
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/smctr.c5
-rw-r--r--drivers/net/tulip/21142.c6
-rw-r--r--drivers/net/tulip/de2104x.c18
-rw-r--r--drivers/net/tulip/de4x5.c2
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/eeprom.c2
-rw-r--r--drivers/net/tulip/interrupt.c2
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/pnic2.c2
-rw-r--r--drivers/net/tulip/timer.c16
-rw-r--r--drivers/net/tulip/tulip.h36
-rw-r--r--drivers/net/tulip/tulip_core.c100
-rw-r--r--drivers/net/tulip/uli526x.c12
-rw-r--r--drivers/net/tulip/winbond-840.c90
-rw-r--r--drivers/net/tulip/xircom_cb.c3
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c2
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c4276
-rw-r--r--drivers/net/ucc_geth.h1339
-rw-r--r--drivers/net/ucc_geth_phy.c801
-rw-r--r--drivers/net/ucc_geth_phy.h217
-rw-r--r--drivers/net/via-rhine.c95
-rw-r--r--drivers/net/via-velocity.c21
-rw-r--r--drivers/net/via-velocity.h21
-rw-r--r--drivers/net/wan/c101.c9
-rw-r--r--drivers/net/wan/cycx_main.c1
-rw-r--r--drivers/net/wan/dlci.c1
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/sdla.c1
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wd.c4
-rw-r--r--drivers/net/wireless/Kconfig24
-rw-r--r--drivers/net/wireless/airo.c52
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h181
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c80
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h1
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c583
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.h296
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c10
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c902
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h6
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c33
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c178
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c166
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_xmit.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3
-rw-r--r--drivers/net/wireless/ipw2100.c7
-rw-r--r--drivers/net/wireless/ipw2200.c246
-rw-r--r--drivers/net/wireless/ipw2200.h51
-rw-r--r--drivers/net/wireless/orinoco.c5
-rw-r--r--drivers/net/wireless/orinoco.h8
-rw-r--r--drivers/net/wireless/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c597
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h6
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/spectrum_cs.c2
-rw-r--r--drivers/net/wireless/strip.c6
-rw-r--r--drivers/net/wireless/zd1201.c2
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c127
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h25
-rw-r--r--drivers/net/wireless/zd1211rw/zd_def.h6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c63
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h17
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.c17
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c7
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c155
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c274
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c131
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h15
-rw-r--r--drivers/net/yellowfin.c8
215 files changed, 18381 insertions, 4502 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 07136ec423bd..d7b115a35962 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -120,7 +120,6 @@ static const char version[] =
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/string.h> 121#include <linux/string.h>
122#include <linux/errno.h> 122#include <linux/errno.h>
123#include <linux/config.h> /* for CONFIG_IP_MULTICAST */
124#include <linux/spinlock.h> 123#include <linux/spinlock.h>
125#include <linux/ethtool.h> 124#include <linux/ethtool.h>
126#include <linux/delay.h> 125#include <linux/delay.h>
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4532b17e40ea..aedfddf20cb3 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1003 /* Calculate the next Tx descriptor entry. */ 1003 /* Calculate the next Tx descriptor entry. */
1004 int entry = vp->cur_tx % TX_RING_SIZE; 1004 int entry = vp->cur_tx % TX_RING_SIZE;
1005 struct boom_tx_desc *prev_entry; 1005 struct boom_tx_desc *prev_entry;
1006 unsigned long flags, i; 1006 unsigned long flags;
1007 int i;
1007 1008
1008 if (vp->tx_full) /* No room to transmit with */ 1009 if (vp->tx_full) /* No room to transmit with */
1009 return 1; 1010 return 1;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80e8ca013e44..415d08113e10 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2928,7 +2928,7 @@ static void set_rx_mode(struct net_device *dev)
2928 int new_mode; 2928 int new_mode;
2929 2929
2930 if (dev->flags & IFF_PROMISC) { 2930 if (dev->flags & IFF_PROMISC) {
2931 if (vortex_debug > 0) 2931 if (vortex_debug > 3)
2932 printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name); 2932 printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
2933 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; 2933 new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
2934 } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) { 2934 } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
@@ -3169,7 +3169,7 @@ static int __init vortex_init(void)
3169{ 3169{
3170 int pci_rc, eisa_rc; 3170 int pci_rc, eisa_rc;
3171 3171
3172 pci_rc = pci_module_init(&vortex_driver); 3172 pci_rc = pci_register_driver(&vortex_driver);
3173 eisa_rc = vortex_eisa_init(); 3173 eisa_rc = vortex_eisa_init();
3174 3174
3175 if (pci_rc == 0) 3175 if (pci_rc == 0)
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 1428bb7715af..488241c5bdd8 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -48,7 +48,7 @@
48 */ 48 */
49 49
50#define DRV_NAME "8139cp" 50#define DRV_NAME "8139cp"
51#define DRV_VERSION "1.2" 51#define DRV_VERSION "1.3"
52#define DRV_RELDATE "Mar 22, 2004" 52#define DRV_RELDATE "Mar 22, 2004"
53 53
54 54
@@ -314,12 +314,6 @@ struct cp_desc {
314 u64 addr; 314 u64 addr;
315}; 315};
316 316
317struct ring_info {
318 struct sk_buff *skb;
319 dma_addr_t mapping;
320 u32 len;
321};
322
323struct cp_dma_stats { 317struct cp_dma_stats {
324 u64 tx_ok; 318 u64 tx_ok;
325 u64 rx_ok; 319 u64 rx_ok;
@@ -353,23 +347,23 @@ struct cp_private {
353 struct net_device_stats net_stats; 347 struct net_device_stats net_stats;
354 struct cp_extra_stats cp_stats; 348 struct cp_extra_stats cp_stats;
355 349
356 unsigned rx_tail ____cacheline_aligned; 350 unsigned rx_head ____cacheline_aligned;
351 unsigned rx_tail;
357 struct cp_desc *rx_ring; 352 struct cp_desc *rx_ring;
358 struct ring_info rx_skb[CP_RX_RING_SIZE]; 353 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
359 unsigned rx_buf_sz;
360 354
361 unsigned tx_head ____cacheline_aligned; 355 unsigned tx_head ____cacheline_aligned;
362 unsigned tx_tail; 356 unsigned tx_tail;
363
364 struct cp_desc *tx_ring; 357 struct cp_desc *tx_ring;
365 struct ring_info tx_skb[CP_TX_RING_SIZE]; 358 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
366 dma_addr_t ring_dma; 359
360 unsigned rx_buf_sz;
361 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
367 362
368#if CP_VLAN_TAG_USED 363#if CP_VLAN_TAG_USED
369 struct vlan_group *vlgrp; 364 struct vlan_group *vlgrp;
370#endif 365#endif
371 366 dma_addr_t ring_dma;
372 unsigned int wol_enabled : 1; /* Is Wake-on-LAN enabled? */
373 367
374 struct mii_if_info mii_if; 368 struct mii_if_info mii_if;
375}; 369};
@@ -407,10 +401,8 @@ static int cp_set_eeprom(struct net_device *dev,
407 struct ethtool_eeprom *eeprom, u8 *data); 401 struct ethtool_eeprom *eeprom, u8 *data);
408 402
409static struct pci_device_id cp_pci_tbl[] = { 403static struct pci_device_id cp_pci_tbl[] = {
410 { PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139, 404 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
411 PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, 405 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
412 { PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322,
413 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
414 { }, 406 { },
415}; 407};
416MODULE_DEVICE_TABLE(pci, cp_pci_tbl); 408MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
@@ -542,7 +534,7 @@ rx_status_loop:
542 struct cp_desc *desc; 534 struct cp_desc *desc;
543 unsigned buflen; 535 unsigned buflen;
544 536
545 skb = cp->rx_skb[rx_tail].skb; 537 skb = cp->rx_skb[rx_tail];
546 BUG_ON(!skb); 538 BUG_ON(!skb);
547 539
548 desc = &cp->rx_ring[rx_tail]; 540 desc = &cp->rx_ring[rx_tail];
@@ -551,7 +543,7 @@ rx_status_loop:
551 break; 543 break;
552 544
553 len = (status & 0x1fff) - 4; 545 len = (status & 0x1fff) - 4;
554 mapping = cp->rx_skb[rx_tail].mapping; 546 mapping = le64_to_cpu(desc->addr);
555 547
556 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) { 548 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
557 /* we don't support incoming fragmented frames. 549 /* we don't support incoming fragmented frames.
@@ -572,7 +564,7 @@ rx_status_loop:
572 564
573 if (netif_msg_rx_status(cp)) 565 if (netif_msg_rx_status(cp))
574 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n", 566 printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d\n",
575 cp->dev->name, rx_tail, status, len); 567 dev->name, rx_tail, status, len);
576 568
577 buflen = cp->rx_buf_sz + RX_OFFSET; 569 buflen = cp->rx_buf_sz + RX_OFFSET;
578 new_skb = dev_alloc_skb (buflen); 570 new_skb = dev_alloc_skb (buflen);
@@ -582,7 +574,7 @@ rx_status_loop:
582 } 574 }
583 575
584 skb_reserve(new_skb, RX_OFFSET); 576 skb_reserve(new_skb, RX_OFFSET);
585 new_skb->dev = cp->dev; 577 new_skb->dev = dev;
586 578
587 pci_unmap_single(cp->pdev, mapping, 579 pci_unmap_single(cp->pdev, mapping,
588 buflen, PCI_DMA_FROMDEVICE); 580 buflen, PCI_DMA_FROMDEVICE);
@@ -595,11 +587,9 @@ rx_status_loop:
595 587
596 skb_put(skb, len); 588 skb_put(skb, len);
597 589
598 mapping = 590 mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
599 cp->rx_skb[rx_tail].mapping = 591 PCI_DMA_FROMDEVICE);
600 pci_map_single(cp->pdev, new_skb->data, 592 cp->rx_skb[rx_tail] = new_skb;
601 buflen, PCI_DMA_FROMDEVICE);
602 cp->rx_skb[rx_tail].skb = new_skb;
603 593
604 cp_rx_skb(cp, skb, desc); 594 cp_rx_skb(cp, skb, desc);
605 rx++; 595 rx++;
@@ -717,19 +707,21 @@ static void cp_tx (struct cp_private *cp)
717 unsigned tx_tail = cp->tx_tail; 707 unsigned tx_tail = cp->tx_tail;
718 708
719 while (tx_tail != tx_head) { 709 while (tx_tail != tx_head) {
710 struct cp_desc *txd = cp->tx_ring + tx_tail;
720 struct sk_buff *skb; 711 struct sk_buff *skb;
721 u32 status; 712 u32 status;
722 713
723 rmb(); 714 rmb();
724 status = le32_to_cpu(cp->tx_ring[tx_tail].opts1); 715 status = le32_to_cpu(txd->opts1);
725 if (status & DescOwn) 716 if (status & DescOwn)
726 break; 717 break;
727 718
728 skb = cp->tx_skb[tx_tail].skb; 719 skb = cp->tx_skb[tx_tail];
729 BUG_ON(!skb); 720 BUG_ON(!skb);
730 721
731 pci_unmap_single(cp->pdev, cp->tx_skb[tx_tail].mapping, 722 pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
732 cp->tx_skb[tx_tail].len, PCI_DMA_TODEVICE); 723 le32_to_cpu(txd->opts1) & 0xffff,
724 PCI_DMA_TODEVICE);
733 725
734 if (status & LastFrag) { 726 if (status & LastFrag) {
735 if (status & (TxError | TxFIFOUnder)) { 727 if (status & (TxError | TxFIFOUnder)) {
@@ -756,7 +748,7 @@ static void cp_tx (struct cp_private *cp)
756 dev_kfree_skb_irq(skb); 748 dev_kfree_skb_irq(skb);
757 } 749 }
758 750
759 cp->tx_skb[tx_tail].skb = NULL; 751 cp->tx_skb[tx_tail] = NULL;
760 752
761 tx_tail = NEXT_TX(tx_tail); 753 tx_tail = NEXT_TX(tx_tail);
762 } 754 }
@@ -826,9 +818,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
826 txd->opts1 = cpu_to_le32(flags); 818 txd->opts1 = cpu_to_le32(flags);
827 wmb(); 819 wmb();
828 820
829 cp->tx_skb[entry].skb = skb; 821 cp->tx_skb[entry] = skb;
830 cp->tx_skb[entry].mapping = mapping;
831 cp->tx_skb[entry].len = len;
832 entry = NEXT_TX(entry); 822 entry = NEXT_TX(entry);
833 } else { 823 } else {
834 struct cp_desc *txd; 824 struct cp_desc *txd;
@@ -844,9 +834,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
844 first_len = skb_headlen(skb); 834 first_len = skb_headlen(skb);
845 first_mapping = pci_map_single(cp->pdev, skb->data, 835 first_mapping = pci_map_single(cp->pdev, skb->data,
846 first_len, PCI_DMA_TODEVICE); 836 first_len, PCI_DMA_TODEVICE);
847 cp->tx_skb[entry].skb = skb; 837 cp->tx_skb[entry] = skb;
848 cp->tx_skb[entry].mapping = first_mapping;
849 cp->tx_skb[entry].len = first_len;
850 entry = NEXT_TX(entry); 838 entry = NEXT_TX(entry);
851 839
852 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 840 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -887,9 +875,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
887 txd->opts1 = cpu_to_le32(ctrl); 875 txd->opts1 = cpu_to_le32(ctrl);
888 wmb(); 876 wmb();
889 877
890 cp->tx_skb[entry].skb = skb; 878 cp->tx_skb[entry] = skb;
891 cp->tx_skb[entry].mapping = mapping;
892 cp->tx_skb[entry].len = len;
893 entry = NEXT_TX(entry); 879 entry = NEXT_TX(entry);
894 } 880 }
895 881
@@ -942,8 +928,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
942 /* Note: do not reorder, GCC is clever about common statements. */ 928 /* Note: do not reorder, GCC is clever about common statements. */
943 if (dev->flags & IFF_PROMISC) { 929 if (dev->flags & IFF_PROMISC) {
944 /* Unconditionally log net taps. */ 930 /* Unconditionally log net taps. */
945 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
946 dev->name);
947 rx_mode = 931 rx_mode =
948 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 932 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
949 AcceptAllPhys; 933 AcceptAllPhys;
@@ -1091,6 +1075,7 @@ static int cp_refill_rx (struct cp_private *cp)
1091 1075
1092 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1076 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1093 struct sk_buff *skb; 1077 struct sk_buff *skb;
1078 dma_addr_t mapping;
1094 1079
1095 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET); 1080 skb = dev_alloc_skb(cp->rx_buf_sz + RX_OFFSET);
1096 if (!skb) 1081 if (!skb)
@@ -1099,12 +1084,12 @@ static int cp_refill_rx (struct cp_private *cp)
1099 skb->dev = cp->dev; 1084 skb->dev = cp->dev;
1100 skb_reserve(skb, RX_OFFSET); 1085 skb_reserve(skb, RX_OFFSET);
1101 1086
1102 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1087 mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
1103 skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1088 PCI_DMA_FROMDEVICE);
1104 cp->rx_skb[i].skb = skb; 1089 cp->rx_skb[i] = skb;
1105 1090
1106 cp->rx_ring[i].opts2 = 0; 1091 cp->rx_ring[i].opts2 = 0;
1107 cp->rx_ring[i].addr = cpu_to_le64(cp->rx_skb[i].mapping); 1092 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1108 if (i == (CP_RX_RING_SIZE - 1)) 1093 if (i == (CP_RX_RING_SIZE - 1))
1109 cp->rx_ring[i].opts1 = 1094 cp->rx_ring[i].opts1 =
1110 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); 1095 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
@@ -1152,23 +1137,27 @@ static int cp_alloc_rings (struct cp_private *cp)
1152 1137
1153static void cp_clean_rings (struct cp_private *cp) 1138static void cp_clean_rings (struct cp_private *cp)
1154{ 1139{
1140 struct cp_desc *desc;
1155 unsigned i; 1141 unsigned i;
1156 1142
1157 for (i = 0; i < CP_RX_RING_SIZE; i++) { 1143 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1158 if (cp->rx_skb[i].skb) { 1144 if (cp->rx_skb[i]) {
1159 pci_unmap_single(cp->pdev, cp->rx_skb[i].mapping, 1145 desc = cp->rx_ring + i;
1146 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1160 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1147 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1161 dev_kfree_skb(cp->rx_skb[i].skb); 1148 dev_kfree_skb(cp->rx_skb[i]);
1162 } 1149 }
1163 } 1150 }
1164 1151
1165 for (i = 0; i < CP_TX_RING_SIZE; i++) { 1152 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1166 if (cp->tx_skb[i].skb) { 1153 if (cp->tx_skb[i]) {
1167 struct sk_buff *skb = cp->tx_skb[i].skb; 1154 struct sk_buff *skb = cp->tx_skb[i];
1168 1155
1169 pci_unmap_single(cp->pdev, cp->tx_skb[i].mapping, 1156 desc = cp->tx_ring + i;
1170 cp->tx_skb[i].len, PCI_DMA_TODEVICE); 1157 pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
1171 if (le32_to_cpu(cp->tx_ring[i].opts1) & LastFrag) 1158 le32_to_cpu(desc->opts1) & 0xffff,
1159 PCI_DMA_TODEVICE);
1160 if (le32_to_cpu(desc->opts1) & LastFrag)
1172 dev_kfree_skb(skb); 1161 dev_kfree_skb(skb);
1173 cp->net_stats.tx_dropped++; 1162 cp->net_stats.tx_dropped++;
1174 } 1163 }
@@ -1177,8 +1166,8 @@ static void cp_clean_rings (struct cp_private *cp)
1177 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1166 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1178 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1167 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1179 1168
1180 memset(&cp->rx_skb, 0, sizeof(struct ring_info) * CP_RX_RING_SIZE); 1169 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1181 memset(&cp->tx_skb, 0, sizeof(struct ring_info) * CP_TX_RING_SIZE); 1170 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1182} 1171}
1183 1172
1184static void cp_free_rings (struct cp_private *cp) 1173static void cp_free_rings (struct cp_private *cp)
@@ -2010,7 +1999,6 @@ static void cp_remove_one (struct pci_dev *pdev)
2010 struct net_device *dev = pci_get_drvdata(pdev); 1999 struct net_device *dev = pci_get_drvdata(pdev);
2011 struct cp_private *cp = netdev_priv(dev); 2000 struct cp_private *cp = netdev_priv(dev);
2012 2001
2013 BUG_ON(!dev);
2014 unregister_netdev(dev); 2002 unregister_netdev(dev);
2015 iounmap(cp->regs); 2003 iounmap(cp->regs);
2016 if (cp->wol_enabled) 2004 if (cp->wol_enabled)
@@ -2025,14 +2013,12 @@ static void cp_remove_one (struct pci_dev *pdev)
2025#ifdef CONFIG_PM 2013#ifdef CONFIG_PM
2026static int cp_suspend (struct pci_dev *pdev, pm_message_t state) 2014static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2027{ 2015{
2028 struct net_device *dev; 2016 struct net_device *dev = pci_get_drvdata(pdev);
2029 struct cp_private *cp; 2017 struct cp_private *cp = netdev_priv(dev);
2030 unsigned long flags; 2018 unsigned long flags;
2031 2019
2032 dev = pci_get_drvdata (pdev); 2020 if (!netif_running(dev))
2033 cp = netdev_priv(dev); 2021 return 0;
2034
2035 if (!dev || !netif_running (dev)) return 0;
2036 2022
2037 netif_device_detach (dev); 2023 netif_device_detach (dev);
2038 netif_stop_queue (dev); 2024 netif_stop_queue (dev);
@@ -2098,7 +2084,7 @@ static int __init cp_init (void)
2098#ifdef MODULE 2084#ifdef MODULE
2099 printk("%s", version); 2085 printk("%s", version);
2100#endif 2086#endif
2101 return pci_module_init (&cp_driver); 2087 return pci_register_driver(&cp_driver);
2102} 2088}
2103 2089
2104static void __exit cp_exit (void) 2090static void __exit cp_exit (void)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index e4f4eaff7679..10301d3daa7d 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -90,7 +90,7 @@
90*/ 90*/
91 91
92#define DRV_NAME "8139too" 92#define DRV_NAME "8139too"
93#define DRV_VERSION "0.9.27" 93#define DRV_VERSION "0.9.28"
94 94
95 95
96#include <linux/module.h> 96#include <linux/module.h>
@@ -2512,9 +2512,6 @@ static void __set_rx_mode (struct net_device *dev)
2512 2512
2513 /* Note: do not reorder, GCC is clever about common statements. */ 2513 /* Note: do not reorder, GCC is clever about common statements. */
2514 if (dev->flags & IFF_PROMISC) { 2514 if (dev->flags & IFF_PROMISC) {
2515 /* Unconditionally log net taps. */
2516 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2517 dev->name);
2518 rx_mode = 2515 rx_mode =
2519 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 2516 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
2520 AcceptAllPhys; 2517 AcceptAllPhys;
@@ -2629,7 +2626,7 @@ static int __init rtl8139_init_module (void)
2629 printk (KERN_INFO RTL8139_DRIVER_NAME "\n"); 2626 printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
2630#endif 2627#endif
2631 2628
2632 return pci_module_init (&rtl8139_pci_driver); 2629 return pci_register_driver(&rtl8139_pci_driver);
2633} 2630}
2634 2631
2635 2632
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 7e2ca9571467..257d3bce3993 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -899,7 +899,7 @@ memory_squeeze:
899} 899}
900 900
901 901
902static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) 902static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
903{ 903{
904 struct i596_cmd *ptr; 904 struct i596_cmd *ptr;
905 905
@@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
932 lp->scb.cmd = I596_NULL; 932 lp->scb.cmd = I596_NULL;
933} 933}
934 934
935static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) 935static void i596_reset(struct net_device *dev, struct i596_private *lp,
936 int ioaddr)
936{ 937{
937 unsigned long flags; 938 unsigned long flags;
938 939
@@ -1578,7 +1579,7 @@ static int debug = -1;
1578module_param(debug, int, 0); 1579module_param(debug, int, 0);
1579MODULE_PARM_DESC(debug, "i82596 debug mask"); 1580MODULE_PARM_DESC(debug, "i82596 debug mask");
1580 1581
1581int init_module(void) 1582int __init init_module(void)
1582{ 1583{
1583 if (debug >= 0) 1584 if (debug >= 0)
1584 i596_debug = debug; 1585 i596_debug = debug;
@@ -1588,7 +1589,7 @@ int init_module(void)
1588 return 0; 1589 return 0;
1589} 1590}
1590 1591
1591void cleanup_module(void) 1592void __exit cleanup_module(void)
1592{ 1593{
1593 unregister_netdev(dev_82596); 1594 unregister_netdev(dev_82596);
1594#ifdef __mc68000__ 1595#ifdef __mc68000__
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index d2935ae39814..3eb7048684a6 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -299,7 +299,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
299 * Slow phase with lock held. 299 * Slow phase with lock held.
300 */ 300 */
301 301
302 disable_irq_nosync(dev->irq); 302 disable_irq_nosync_lockdep(dev->irq);
303 303
304 spin_lock(&ei_local->page_lock); 304 spin_lock(&ei_local->page_lock);
305 305
@@ -338,7 +338,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
338 netif_stop_queue(dev); 338 netif_stop_queue(dev);
339 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 339 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
340 spin_unlock(&ei_local->page_lock); 340 spin_unlock(&ei_local->page_lock);
341 enable_irq(dev->irq); 341 enable_irq_lockdep(dev->irq);
342 ei_local->stat.tx_errors++; 342 ei_local->stat.tx_errors++;
343 return 1; 343 return 1;
344 } 344 }
@@ -379,7 +379,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
379 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 379 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
380 380
381 spin_unlock(&ei_local->page_lock); 381 spin_unlock(&ei_local->page_lock);
382 enable_irq(dev->irq); 382 enable_irq_lockdep(dev->irq);
383 383
384 dev_kfree_skb (skb); 384 dev_kfree_skb (skb);
385 ei_local->stat.tx_bytes += send_length; 385 ei_local->stat.tx_bytes += send_length;
@@ -505,9 +505,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
505#ifdef CONFIG_NET_POLL_CONTROLLER 505#ifdef CONFIG_NET_POLL_CONTROLLER
506void ei_poll(struct net_device *dev) 506void ei_poll(struct net_device *dev)
507{ 507{
508 disable_irq(dev->irq); 508 disable_irq_lockdep(dev->irq);
509 ei_interrupt(dev->irq, dev, NULL); 509 ei_interrupt(dev->irq, dev, NULL);
510 enable_irq(dev->irq); 510 enable_irq_lockdep(dev->irq);
511} 511}
512#endif 512#endif
513 513
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 39189903e355..de4f9e1f2ca5 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1411,6 +1411,22 @@ config FORCEDETH
1411 <file:Documentation/networking/net-modules.txt>. The module will be 1411 <file:Documentation/networking/net-modules.txt>. The module will be
1412 called forcedeth. 1412 called forcedeth.
1413 1413
1414config FORCEDETH_NAPI
1415 bool "Use Rx and Tx Polling (NAPI) (EXPERIMENTAL)"
1416 depends on FORCEDETH && EXPERIMENTAL
1417 help
1418 NAPI is a new driver API designed to reduce CPU and interrupt load
1419 when the driver is receiving lots of packets from the card. It is
1420 still somewhat experimental and thus not yet enabled by default.
1421
1422 If your estimated Rx load is 10kpps or more, or if the card will be
1423 deployed on potentially unfriendly networks (e.g. in a firewall),
1424 then say Y here.
1425
1426 See <file:Documentation/networking/NAPI_HOWTO.txt> for more
1427 information.
1428
1429 If in doubt, say N.
1414 1430
1415config CS89x0 1431config CS89x0
1416 tristate "CS89x0 support" 1432 tristate "CS89x0 support"
@@ -1724,6 +1740,20 @@ config VIA_RHINE_MMIO
1724 1740
1725 If unsure, say Y. 1741 If unsure, say Y.
1726 1742
1743config VIA_RHINE_NAPI
1744 bool "Use Rx Polling (NAPI)"
1745 depends on VIA_RHINE
1746 help
1747 NAPI is a new driver API designed to reduce CPU and interrupt load
1748 when the driver is receiving lots of packets from the card.
1749
1750 If your estimated Rx load is 10kpps or more, or if the card will be
1751 deployed on potentially unfriendly networks (e.g. in a firewall),
1752 then say Y here.
1753
1754 See <file:Documentation/networking/NAPI_HOWTO.txt> for more
1755 information.
1756
1727config LAN_SAA9730 1757config LAN_SAA9730
1728 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" 1758 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
1729 depends on NET_PCI && EXPERIMENTAL && MIPS 1759 depends on NET_PCI && EXPERIMENTAL && MIPS
@@ -2219,6 +2249,33 @@ config GFAR_NAPI
2219 bool "NAPI Support" 2249 bool "NAPI Support"
2220 depends on GIANFAR 2250 depends on GIANFAR
2221 2251
2252config UCC_GETH
2253 tristate "Freescale QE UCC GETH"
2254 depends on QUICC_ENGINE && UCC_FAST
2255 help
2256 This driver supports the Gigabit Ethernet mode of QE UCC.
2257 QE can be found on MPC836x CPUs.
2258
2259config UGETH_NAPI
2260 bool "NAPI Support"
2261 depends on UCC_GETH
2262
2263config UGETH_MAGIC_PACKET
2264 bool "Magic Packet detection support"
2265 depends on UCC_GETH
2266
2267config UGETH_FILTERING
2268 bool "Mac address filtering support"
2269 depends on UCC_GETH
2270
2271config UGETH_TX_ON_DEMOND
2272 bool "Transmit on Demond support"
2273 depends on UCC_GETH
2274
2275config UGETH_HAS_GIGA
2276 bool
2277 depends on UCC_GETH && MPC836x
2278
2222config MV643XX_ETH 2279config MV643XX_ETH
2223 tristate "MV-643XX Ethernet support" 2280 tristate "MV-643XX Ethernet support"
2224 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM 2281 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM
@@ -2249,6 +2306,15 @@ config MV643XX_ETH_2
2249 This enables support for Port 2 of the Marvell MV643XX Gigabit 2306 This enables support for Port 2 of the Marvell MV643XX Gigabit
2250 Ethernet. 2307 Ethernet.
2251 2308
2309config QLA3XXX
2310 tristate "QLogic QLA3XXX Network Driver Support"
2311 depends on PCI
2312 help
2313 This driver supports QLogic ISP3XXX gigabit Ethernet cards.
2314
2315 To compile this driver as a module, choose M here: the module
2316 will be called qla3xxx.
2317
2252endmenu 2318endmenu
2253 2319
2254# 2320#
@@ -2352,7 +2418,7 @@ config MYRI10GE
2352 you will need a newer firmware image. 2418 you will need a newer firmware image.
2353 You may get this image or more information, at: 2419 You may get this image or more information, at:
2354 2420
2355 <http://www.myri.com/Myri-10G/> 2421 <http://www.myri.com/scs/download-Myri10GE.html>
2356 2422
2357 To compile this driver as a module, choose M here and read 2423 To compile this driver as a module, choose M here and read
2358 <file:Documentation/networking/net-modules.txt>. The module 2424 <file:Documentation/networking/net-modules.txt>. The module
@@ -2509,6 +2575,7 @@ config PLIP
2509 2575
2510config PPP 2576config PPP
2511 tristate "PPP (point-to-point protocol) support" 2577 tristate "PPP (point-to-point protocol) support"
2578 select SLHC
2512 ---help--- 2579 ---help---
2513 PPP (Point to Point Protocol) is a newer and better SLIP. It serves 2580 PPP (Point to Point Protocol) is a newer and better SLIP. It serves
2514 the same purpose: sending Internet traffic over telephone (and other 2581 the same purpose: sending Internet traffic over telephone (and other
@@ -2689,6 +2756,7 @@ config SLIP
2689config SLIP_COMPRESSED 2756config SLIP_COMPRESSED
2690 bool "CSLIP compressed headers" 2757 bool "CSLIP compressed headers"
2691 depends on SLIP 2758 depends on SLIP
2759 select SLHC
2692 ---help--- 2760 ---help---
2693 This protocol is faster than SLIP because it uses compression on the 2761 This protocol is faster than SLIP because it uses compression on the
2694 TCP/IP headers (not on the data itself), but it has to be supported 2762 TCP/IP headers (not on the data itself), but it has to be supported
@@ -2701,6 +2769,12 @@ config SLIP_COMPRESSED
2701 <http://www.tldp.org/docs.html#howto>, explains how to configure 2769 <http://www.tldp.org/docs.html#howto>, explains how to configure
2702 CSLIP. This won't enlarge your kernel. 2770 CSLIP. This won't enlarge your kernel.
2703 2771
2772config SLHC
2773 tristate
2774 help
2775 This option enables Van Jacobsen serial line header compression
2776 routines.
2777
2704config SLIP_SMART 2778config SLIP_SMART
2705 bool "Keepalive and linefill" 2779 bool "Keepalive and linefill"
2706 depends on SLIP 2780 depends on SLIP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c91e95126f78..6ff17649c0fc 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -2,10 +2,6 @@
2# Makefile for the Linux network (ethercard) device drivers. 2# Makefile for the Linux network (ethercard) device drivers.
3# 3#
4 4
5ifeq ($(CONFIG_ISDN_PPP),y)
6 obj-$(CONFIG_ISDN) += slhc.o
7endif
8
9obj-$(CONFIG_E1000) += e1000/ 5obj-$(CONFIG_E1000) += e1000/
10obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 6obj-$(CONFIG_IBM_EMAC) += ibm_emac/
11obj-$(CONFIG_IXGB) += ixgb/ 7obj-$(CONFIG_IXGB) += ixgb/
@@ -18,6 +14,9 @@ gianfar_driver-objs := gianfar.o \
18 gianfar_mii.o \ 14 gianfar_mii.o \
19 gianfar_sysfs.o 15 gianfar_sysfs.o
20 16
17obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
18ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o
19
21# 20#
22# link order important here 21# link order important here
23# 22#
@@ -110,8 +109,9 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
110obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 109obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
111 110
112obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 111obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
112obj-$(CONFIG_QLA3XXX) += qla3xxx.o
113 113
114obj-$(CONFIG_PPP) += ppp_generic.o slhc.o 114obj-$(CONFIG_PPP) += ppp_generic.o
115obj-$(CONFIG_PPP_ASYNC) += ppp_async.o 115obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
116obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o 116obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
117obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o 117obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
@@ -120,9 +120,7 @@ obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
120obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 120obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
121 121
122obj-$(CONFIG_SLIP) += slip.o 122obj-$(CONFIG_SLIP) += slip.o
123ifeq ($(CONFIG_SLIP_COMPRESSED),y) 123obj-$(CONFIG_SLHC) += slhc.o
124 obj-$(CONFIG_SLIP) += slhc.o
125endif
126 124
127obj-$(CONFIG_DUMMY) += dummy.o 125obj-$(CONFIG_DUMMY) += dummy.o
128obj-$(CONFIG_IFB) += ifb.o 126obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 7952dc6d77e3..0fbbcb75af69 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)");
370MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); 370MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
371MODULE_LICENSE("GPL"); 371MODULE_LICENSE("GPL");
372 372
373int 373int __init init_module(void)
374init_module(void)
375{ 374{
376 struct net_device *dev; 375 struct net_device *dev;
377 int this_dev, found = 0; 376 int this_dev, found = 0;
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 1c01e9b3d07c..c0f3574b470b 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -725,7 +725,7 @@ static struct pci_driver acenic_pci_driver = {
725 725
726static int __init acenic_init(void) 726static int __init acenic_init(void)
727{ 727{
728 return pci_module_init(&acenic_pci_driver); 728 return pci_register_driver(&acenic_pci_driver);
729} 729}
730 730
731static void __exit acenic_exit(void) 731static void __exit acenic_exit(void)
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index ed322a76980d..f83df129d7b9 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -101,9 +101,9 @@ Revision History:
101 101
102#include "amd8111e.h" 102#include "amd8111e.h"
103#define MODULE_NAME "amd8111e" 103#define MODULE_NAME "amd8111e"
104#define MODULE_VERS "3.0.5" 104#define MODULE_VERS "3.0.6"
105MODULE_AUTHOR("Advanced Micro Devices, Inc."); 105MODULE_AUTHOR("Advanced Micro Devices, Inc.");
106MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.3"); 106MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version 3.0.6");
107MODULE_LICENSE("GPL"); 107MODULE_LICENSE("GPL");
108MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); 108MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
109module_param_array(speed_duplex, int, NULL, 0); 109module_param_array(speed_duplex, int, NULL, 0);
@@ -1527,7 +1527,6 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
1527 u32 mc_filter[2] ; 1527 u32 mc_filter[2] ;
1528 int i,bit_num; 1528 int i,bit_num;
1529 if(dev->flags & IFF_PROMISC){ 1529 if(dev->flags & IFF_PROMISC){
1530 printk(KERN_INFO "%s: Setting promiscuous mode.\n",dev->name);
1531 writel( VAL2 | PROM, lp->mmio + CMD2); 1530 writel( VAL2 | PROM, lp->mmio + CMD2);
1532 return; 1531 return;
1533 } 1532 }
@@ -2158,7 +2157,7 @@ static struct pci_driver amd8111e_driver = {
2158 2157
2159static int __init amd8111e_init(void) 2158static int __init amd8111e_init(void)
2160{ 2159{
2161 return pci_module_init(&amd8111e_driver); 2160 return pci_register_driver(&amd8111e_driver);
2162} 2161}
2163 2162
2164static void __exit amd8111e_cleanup(void) 2163static void __exit amd8111e_cleanup(void)
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index b14e89004c3a..0a0e0cd81a23 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -29,7 +29,7 @@ config ATALK
29 even politically correct people are allowed to say Y here. 29 even politically correct people are allowed to say Y here.
30 30
31config DEV_APPLETALK 31config DEV_APPLETALK
32 bool "Appletalk interfaces support" 32 tristate "Appletalk interfaces support"
33 depends on ATALK 33 depends on ATALK
34 help 34 help
35 AppleTalk is the protocol that Apple computers can use to communicate 35 AppleTalk is the protocol that Apple computers can use to communicate
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1d01ac0000e4..ae7f828344d9 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1030,7 +1030,7 @@ module_param(io, int, 0);
1030module_param(irq, int, 0); 1030module_param(irq, int, 0);
1031module_param(board_type, int, 0); 1031module_param(board_type, int, 0);
1032 1032
1033int init_module(void) 1033int __init init_module(void)
1034{ 1034{
1035 if (io == 0) 1035 if (io == 0)
1036 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", 1036 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 979a33df0a8c..fc256c197cd6 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -177,7 +177,7 @@ static struct pci_driver com20020pci_driver = {
177static int __init com20020pci_init(void) 177static int __init com20020pci_init(void)
178{ 178{
179 BUGLVL(D_NORMAL) printk(VERSION); 179 BUGLVL(D_NORMAL) printk(VERSION);
180 return pci_module_init(&com20020pci_driver); 180 return pci_register_driver(&com20020pci_driver);
181} 181}
182 182
183static void __exit com20020pci_cleanup(void) 183static void __exit com20020pci_cleanup(void)
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index cc721addd576..3aef3c10d56f 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -825,8 +825,6 @@ static void set_multicast_list(struct net_device *dev)
825 ariadne_init_ring(dev); 825 ariadne_init_ring(dev);
826 826
827 if (dev->flags & IFF_PROMISC) { 827 if (dev->flags & IFF_PROMISC) {
828 /* Log any net taps. */
829 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
830 lance->RAP = CSR15; /* Mode Register */ 828 lance->RAP = CSR15; /* Mode Register */
831 lance->RDP = PROM; /* Set promiscuous mode */ 829 lance->RDP = PROM; /* Set promiscuous mode */
832 } else { 830 } else {
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 5d7929c79bce..1a85451dcb41 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -58,7 +58,7 @@
58#include <asm/dma.h> 58#include <asm/dma.h>
59 59
60static char version[] __initdata = 60static char version[] __initdata =
61 "at1700.c:v1.15 4/7/98 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; 61 "at1700.c:v1.16 9/11/06 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
62 62
63#define DRV_NAME "at1700" 63#define DRV_NAME "at1700"
64 64
@@ -851,8 +851,6 @@ set_rx_mode(struct net_device *dev)
851 int i; 851 int i;
852 852
853 if (dev->flags & IFF_PROMISC) { 853 if (dev->flags & IFF_PROMISC) {
854 /* Unconditionally log net taps. */
855 printk("%s: Promiscuous mode enabled.\n", dev->name);
856 memset(mc_filter, 0xff, sizeof(mc_filter)); 854 memset(mc_filter, 0xff, sizeof(mc_filter));
857 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ 855 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
858 } else if (dev->mc_count > MC_FILTERBREAK 856 } else if (dev->mc_count > MC_FILTERBREAK
@@ -901,7 +899,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
901MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); 899MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
902MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); 900MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
903 901
904int init_module(void) 902int __init init_module(void)
905{ 903{
906 if (io == 0) 904 if (io == 0)
907 printk("at1700: You should not use auto-probing with insmod!\n"); 905 printk("at1700: You should not use auto-probing with insmod!\n");
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 91783a8008be..465efe7a6c56 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1121,7 +1121,7 @@ static void set_multicast_list( struct net_device *dev )
1121 1121
1122 if (dev->flags & IFF_PROMISC) { 1122 if (dev->flags & IFF_PROMISC) {
1123 /* Log any net taps. */ 1123 /* Log any net taps. */
1124 DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name )); 1124 DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
1125 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ 1125 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
1126 } else { 1126 } else {
1127 short multicast_table[4]; 1127 short multicast_table[4];
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 55f6e3f65b53..85be0e6aa1f3 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -72,7 +72,7 @@ static int au1000_debug = 3;
72#endif 72#endif
73 73
74#define DRV_NAME "au1000_eth" 74#define DRV_NAME "au1000_eth"
75#define DRV_VERSION "1.5" 75#define DRV_VERSION "1.6"
76#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" 76#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
77#define DRV_DESC "Au1xxx on-chip Ethernet driver" 77#define DRV_DESC "Au1xxx on-chip Ethernet driver"
78 78
@@ -1292,7 +1292,6 @@ static void set_rx_mode(struct net_device *dev)
1292 1292
1293 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1293 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1294 aup->mac->control |= MAC_PROMISCUOUS; 1294 aup->mac->control |= MAC_PROMISCUOUS;
1295 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1296 } else if ((dev->flags & IFF_ALLMULTI) || 1295 } else if ((dev->flags & IFF_ALLMULTI) ||
1297 dev->mc_count > MULTICAST_FILTER_LIMIT) { 1296 dev->mc_count > MULTICAST_FILTER_LIMIT) {
1298 aup->mac->control |= MAC_PASS_ALL_MULTI; 1297 aup->mac->control |= MAC_PASS_ALL_MULTI;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index bea0fc0ede2f..17eb2912971d 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -2354,7 +2354,7 @@ static int __init b44_init(void)
2354 dma_desc_align_mask = ~(dma_desc_align_size - 1); 2354 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2355 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc)); 2355 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2356 2356
2357 return pci_module_init(&b44_driver); 2357 return pci_register_driver(&b44_driver);
2358} 2358}
2359 2359
2360static void __exit b44_cleanup(void) 2360static void __exit b44_cleanup(void)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index db73de0d2511..654b903985cd 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
56 56
57#define DRV_MODULE_NAME "bnx2" 57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": " 58#define PFX DRV_MODULE_NAME ": "
59#define DRV_MODULE_VERSION "1.4.43" 59#define DRV_MODULE_VERSION "1.4.44"
60#define DRV_MODULE_RELDATE "June 28, 2006" 60#define DRV_MODULE_RELDATE "August 10, 2006"
61 61
62#define RUN_AT(x) (jiffies + (x)) 62#define RUN_AT(x) (jiffies + (x))
63 63
@@ -209,8 +209,10 @@ MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209 209
210static inline u32 bnx2_tx_avail(struct bnx2 *bp) 210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{ 211{
212 u32 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons); 212 u32 diff;
213 213
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
214 if (diff > MAX_TX_DESC_CNT) 216 if (diff > MAX_TX_DESC_CNT)
215 diff = (diff & MAX_TX_DESC_CNT) - 1; 217 diff = (diff & MAX_TX_DESC_CNT) - 1;
216 return (bp->tx_ring_size - diff); 218 return (bp->tx_ring_size - diff);
@@ -1569,7 +1571,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1569 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; 1571 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1570 unsigned long align; 1572 unsigned long align;
1571 1573
1572 skb = dev_alloc_skb(bp->rx_buf_size); 1574 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1573 if (skb == NULL) { 1575 if (skb == NULL) {
1574 return -ENOMEM; 1576 return -ENOMEM;
1575 } 1577 }
@@ -1578,7 +1580,6 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1578 skb_reserve(skb, 8 - align); 1580 skb_reserve(skb, 8 - align);
1579 } 1581 }
1580 1582
1581 skb->dev = bp->dev;
1582 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1583 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1583 PCI_DMA_FROMDEVICE); 1584 PCI_DMA_FROMDEVICE);
1584 1585
@@ -1686,15 +1687,20 @@ bnx2_tx_int(struct bnx2 *bp)
1686 } 1687 }
1687 1688
1688 bp->tx_cons = sw_cons; 1689 bp->tx_cons = sw_cons;
1690 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1691 * before checking for netif_queue_stopped(). Without the
1692 * memory barrier, there is a small possibility that bnx2_start_xmit()
1693 * will miss it and cause the queue to be stopped forever.
1694 */
1695 smp_mb();
1689 1696
1690 if (unlikely(netif_queue_stopped(bp->dev))) { 1697 if (unlikely(netif_queue_stopped(bp->dev)) &&
1691 spin_lock(&bp->tx_lock); 1698 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1699 netif_tx_lock(bp->dev);
1692 if ((netif_queue_stopped(bp->dev)) && 1700 if ((netif_queue_stopped(bp->dev)) &&
1693 (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)) { 1701 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1694
1695 netif_wake_queue(bp->dev); 1702 netif_wake_queue(bp->dev);
1696 } 1703 netif_tx_unlock(bp->dev);
1697 spin_unlock(&bp->tx_lock);
1698 } 1704 }
1699} 1705}
1700 1706
@@ -1786,7 +1792,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1786 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) { 1792 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1787 struct sk_buff *new_skb; 1793 struct sk_buff *new_skb;
1788 1794
1789 new_skb = dev_alloc_skb(len + 2); 1795 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1790 if (new_skb == NULL) 1796 if (new_skb == NULL)
1791 goto reuse_rx; 1797 goto reuse_rx;
1792 1798
@@ -1797,7 +1803,6 @@ bnx2_rx_int(struct bnx2 *bp, int budget)
1797 1803
1798 skb_reserve(new_skb, 2); 1804 skb_reserve(new_skb, 2);
1799 skb_put(new_skb, len); 1805 skb_put(new_skb, len);
1800 new_skb->dev = bp->dev;
1801 1806
1802 bnx2_reuse_rx_skb(bp, skb, 1807 bnx2_reuse_rx_skb(bp, skb,
1803 sw_ring_cons, sw_ring_prod); 1808 sw_ring_cons, sw_ring_prod);
@@ -3503,6 +3508,8 @@ bnx2_init_tx_ring(struct bnx2 *bp)
3503 struct tx_bd *txbd; 3508 struct tx_bd *txbd;
3504 u32 val; 3509 u32 val;
3505 3510
3511 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3512
3506 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT]; 3513 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3507 3514
3508 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32; 3515 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
@@ -3952,7 +3959,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3952 return -EINVAL; 3959 return -EINVAL;
3953 3960
3954 pkt_size = 1514; 3961 pkt_size = 1514;
3955 skb = dev_alloc_skb(pkt_size); 3962 skb = netdev_alloc_skb(bp->dev, pkt_size);
3956 if (!skb) 3963 if (!skb)
3957 return -ENOMEM; 3964 return -ENOMEM;
3958 packet = skb_put(skb, pkt_size); 3965 packet = skb_put(skb, pkt_size);
@@ -4390,10 +4397,8 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4390#endif 4397#endif
4391 4398
4392/* Called with netif_tx_lock. 4399/* Called with netif_tx_lock.
4393 * hard_start_xmit is pseudo-lockless - a lock is only required when 4400 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4394 * the tx queue is full. This way, we get the benefit of lockless 4401 * netif_wake_queue().
4395 * operations most of the time without the complexities to handle
4396 * netif_stop_queue/wake_queue race conditions.
4397 */ 4402 */
4398static int 4403static int
4399bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) 4404bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -4512,12 +4517,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4512 dev->trans_start = jiffies; 4517 dev->trans_start = jiffies;
4513 4518
4514 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) { 4519 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4515 spin_lock(&bp->tx_lock);
4516 netif_stop_queue(dev); 4520 netif_stop_queue(dev);
4517 4521 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4518 if (bnx2_tx_avail(bp) > MAX_SKB_FRAGS)
4519 netif_wake_queue(dev); 4522 netif_wake_queue(dev);
4520 spin_unlock(&bp->tx_lock);
4521 } 4523 }
4522 4524
4523 return NETDEV_TX_OK; 4525 return NETDEV_TX_OK;
@@ -5628,7 +5630,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5628 bp->pdev = pdev; 5630 bp->pdev = pdev;
5629 5631
5630 spin_lock_init(&bp->phy_lock); 5632 spin_lock_init(&bp->phy_lock);
5631 spin_lock_init(&bp->tx_lock);
5632 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp); 5633 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5633 5634
5634 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 5635 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
@@ -5751,7 +5752,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5751 bp->mac_addr[5] = (u8) reg; 5752 bp->mac_addr[5] = (u8) reg;
5752 5753
5753 bp->tx_ring_size = MAX_TX_DESC_CNT; 5754 bp->tx_ring_size = MAX_TX_DESC_CNT;
5754 bnx2_set_rx_ring_size(bp, 100); 5755 bnx2_set_rx_ring_size(bp, 255);
5755 5756
5756 bp->rx_csum = 1; 5757 bp->rx_csum = 1;
5757 5758
@@ -6015,7 +6016,7 @@ static struct pci_driver bnx2_pci_driver = {
6015 6016
6016static int __init bnx2_init(void) 6017static int __init bnx2_init(void)
6017{ 6018{
6018 return pci_module_init(&bnx2_pci_driver); 6019 return pci_register_driver(&bnx2_pci_driver);
6019} 6020}
6020 6021
6021static void __exit bnx2_cleanup(void) 6022static void __exit bnx2_cleanup(void)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 658c5ee95c73..fe804763c607 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -3890,10 +3890,6 @@ struct bnx2 {
3890 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES))); 3890 u32 tx_prod_bseq __attribute__((aligned(L1_CACHE_BYTES)));
3891 u16 tx_prod; 3891 u16 tx_prod;
3892 3892
3893 struct tx_bd *tx_desc_ring;
3894 struct sw_bd *tx_buf_ring;
3895 int tx_ring_size;
3896
3897 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES))); 3893 u16 tx_cons __attribute__((aligned(L1_CACHE_BYTES)));
3898 u16 hw_tx_cons; 3894 u16 hw_tx_cons;
3899 3895
@@ -3916,9 +3912,11 @@ struct bnx2 {
3916 struct sw_bd *rx_buf_ring; 3912 struct sw_bd *rx_buf_ring;
3917 struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; 3913 struct rx_bd *rx_desc_ring[MAX_RX_RINGS];
3918 3914
3919 /* Only used to synchronize netif_stop_queue/wake_queue when tx */ 3915 /* TX constants */
3920 /* ring is full */ 3916 struct tx_bd *tx_desc_ring;
3921 spinlock_t tx_lock; 3917 struct sw_bd *tx_buf_ring;
3918 int tx_ring_size;
3919 u32 tx_wake_thresh;
3922 3920
3923 /* End of fields used in the performance code paths. */ 3921 /* End of fields used in the performance code paths. */
3924 3922
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index a31544ccb3c4..26040abfef62 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -5245,7 +5245,7 @@ static int __init cas_init(void)
5245 else 5245 else
5246 link_transition_timeout = 0; 5246 link_transition_timeout = 0;
5247 5247
5248 return pci_module_init(&cas_driver); 5248 return pci_register_driver(&cas_driver);
5249} 5249}
5250 5250
5251static void __exit cas_cleanup(void) 5251static void __exit cas_cleanup(void)
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index e67872433e92..b6de184e4699 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -1243,7 +1243,7 @@ static struct pci_driver driver = {
1243 1243
1244static int __init t1_init_module(void) 1244static int __init t1_init_module(void)
1245{ 1245{
1246 return pci_module_init(&driver); 1246 return pci_register_driver(&driver);
1247} 1247}
1248 1248
1249static void __exit t1_cleanup_module(void) 1249static void __exit t1_cleanup_module(void)
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 47eecce35fa4..2dcca79b1f6a 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL");
1905 1905
1906*/ 1906*/
1907 1907
1908int 1908int __init init_module(void)
1909init_module(void)
1910{ 1909{
1911 struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); 1910 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1912 struct net_local *lp; 1911 struct net_local *lp;
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 91cc8cbdd440..7d06dedbfb26 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -3444,7 +3444,7 @@ static int __init dfx_init(void)
3444{ 3444{
3445 int rc_pci, rc_eisa; 3445 int rc_pci, rc_eisa;
3446 3446
3447 rc_pci = pci_module_init(&dfx_driver); 3447 rc_pci = pci_register_driver(&dfx_driver);
3448 if (rc_pci >= 0) dfx_have_pci = 1; 3448 if (rc_pci >= 0) dfx_have_pci = 1;
3449 3449
3450 rc_eisa = dfx_eisa_init(); 3450 rc_eisa = dfx_eisa_init();
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 402961e68c89..a572c2970564 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1815,7 +1815,7 @@ static struct pci_driver rio_driver = {
1815static int __init 1815static int __init
1816rio_init (void) 1816rio_init (void)
1817{ 1817{
1818 return pci_module_init (&rio_driver); 1818 return pci_register_driver(&rio_driver);
1819} 1819}
1820 1820
1821static void __exit 1821static void __exit
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 1b758b707134..a860ebbbf815 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev)
339 spin_unlock_irqrestore(&db->lock,flags); 339 spin_unlock_irqrestore(&db->lock,flags);
340} 340}
341 341
342#ifdef CONFIG_NET_POLL_CONTROLLER
343/*
344 *Used by netconsole
345 */
346static void dm9000_poll_controller(struct net_device *dev)
347{
348 disable_irq(dev->irq);
349 dm9000_interrupt(dev->irq,dev,NULL);
350 enable_irq(dev->irq);
351}
352#endif
342 353
343/* dm9000_release_board 354/* dm9000_release_board
344 * 355 *
@@ -366,8 +377,8 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
366 kfree(db->data_req); 377 kfree(db->data_req);
367 } 378 }
368 379
369 if (db->addr_res != NULL) { 380 if (db->addr_req != NULL) {
370 release_resource(db->addr_res); 381 release_resource(db->addr_req);
371 kfree(db->addr_req); 382 kfree(db->addr_req);
372 } 383 }
373} 384}
@@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev)
538 ndev->stop = &dm9000_stop; 549 ndev->stop = &dm9000_stop;
539 ndev->get_stats = &dm9000_get_stats; 550 ndev->get_stats = &dm9000_get_stats;
540 ndev->set_multicast_list = &dm9000_hash_table; 551 ndev->set_multicast_list = &dm9000_hash_table;
552#ifdef CONFIG_NET_POLL_CONTROLLER
553 ndev->poll_controller = &dm9000_poll_controller;
554#endif
541 555
542#ifdef DM9000_PROGRAM_EEPROM 556#ifdef DM9000_PROGRAM_EEPROM
543 program_eeprom(db); 557 program_eeprom(db);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 91ef5f2fd768..47d970896a5c 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. 4 Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free 7 under the terms of the GNU General Public License as published by the Free
@@ -158,10 +158,10 @@
158 158
159 159
160#define DRV_NAME "e100" 160#define DRV_NAME "e100"
161#define DRV_EXT "-NAPI" 161#define DRV_EXT "-NAPI"
162#define DRV_VERSION "3.5.10-k2"DRV_EXT 162#define DRV_VERSION "3.5.16-k2"DRV_EXT
163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 163#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
164#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" 164#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
165#define PFX DRV_NAME ": " 165#define PFX DRV_NAME ": "
166 166
167#define E100_WATCHDOG_PERIOD (2 * HZ) 167#define E100_WATCHDOG_PERIOD (2 * HZ)
@@ -173,8 +173,11 @@ MODULE_LICENSE("GPL");
173MODULE_VERSION(DRV_VERSION); 173MODULE_VERSION(DRV_VERSION);
174 174
175static int debug = 3; 175static int debug = 3;
176static int eeprom_bad_csum_allow = 0;
176module_param(debug, int, 0); 177module_param(debug, int, 0);
178module_param(eeprom_bad_csum_allow, int, 0);
177MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 179MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
180MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
178#define DPRINTK(nlevel, klevel, fmt, args...) \ 181#define DPRINTK(nlevel, klevel, fmt, args...) \
179 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 182 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
180 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 183 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
@@ -756,7 +759,8 @@ static int e100_eeprom_load(struct nic *nic)
756 checksum = le16_to_cpu(0xBABA - checksum); 759 checksum = le16_to_cpu(0xBABA - checksum);
757 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) { 760 if(checksum != nic->eeprom[nic->eeprom_wc - 1]) {
758 DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); 761 DPRINTK(PROBE, ERR, "EEPROM corrupted\n");
759 return -EAGAIN; 762 if (!eeprom_bad_csum_allow)
763 return -EAGAIN;
760 } 764 }
761 765
762 return 0; 766 return 0;
@@ -1391,15 +1395,11 @@ static int e100_phy_init(struct nic *nic)
1391 } 1395 }
1392 1396
1393 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && 1397 if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
1394 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { 1398 (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
1395 /* enable/disable MDI/MDI-X auto-switching. 1399 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
1396 MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ 1400 /* enable/disable MDI/MDI-X auto-switching. */
1397 if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || 1401 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
1398 (nic->mac == mac_82551_10) || (nic->mii.force_media) || 1402 nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
1399 !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
1400 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
1401 else
1402 mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
1403 } 1403 }
1404 1404
1405 return 0; 1405 return 0;
@@ -1763,11 +1763,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1763#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1763#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1764static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1764static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1765{ 1765{
1766 if(!(rx->skb = dev_alloc_skb(RFD_BUF_LEN + NET_IP_ALIGN))) 1766 if(!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN)))
1767 return -ENOMEM; 1767 return -ENOMEM;
1768 1768
1769 /* Align, init, and map the RFD. */ 1769 /* Align, init, and map the RFD. */
1770 rx->skb->dev = nic->netdev;
1771 skb_reserve(rx->skb, NET_IP_ALIGN); 1770 skb_reserve(rx->skb, NET_IP_ALIGN);
1772 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd)); 1771 memcpy(rx->skb->data, &nic->blank_rfd, sizeof(struct rfd));
1773 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1772 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
@@ -2143,7 +2142,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
2143 2142
2144 e100_start_receiver(nic, NULL); 2143 e100_start_receiver(nic, NULL);
2145 2144
2146 if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { 2145 if(!(skb = netdev_alloc_skb(nic->netdev, ETH_DATA_LEN))) {
2147 err = -ENOMEM; 2146 err = -ENOMEM;
2148 goto err_loopback_none; 2147 goto err_loopback_none;
2149 } 2148 }
@@ -2795,6 +2794,7 @@ static pci_ers_result_t e100_io_error_detected(struct pci_dev *pdev, pci_channel
2795 /* Detach; put netif into state similar to hotplug unplug. */ 2794 /* Detach; put netif into state similar to hotplug unplug. */
2796 netif_poll_enable(netdev); 2795 netif_poll_enable(netdev);
2797 netif_device_detach(netdev); 2796 netif_device_detach(netdev);
2797 pci_disable_device(pdev);
2798 2798
2799 /* Request a slot reset. */ 2799 /* Request a slot reset. */
2800 return PCI_ERS_RESULT_NEED_RESET; 2800 return PCI_ERS_RESULT_NEED_RESET;
@@ -2873,7 +2873,7 @@ static int __init e100_init_module(void)
2873 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 2873 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2874 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT); 2874 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT);
2875 } 2875 }
2876 return pci_module_init(&e100_driver); 2876 return pci_register_driver(&e100_driver);
2877} 2877}
2878 2878
2879static void __exit e100_cleanup_module(void) 2879static void __exit e100_cleanup_module(void)
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index d304297c496c..98afa9c2057e 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -242,12 +242,10 @@ struct e1000_adapter {
242 struct timer_list watchdog_timer; 242 struct timer_list watchdog_timer;
243 struct timer_list phy_info_timer; 243 struct timer_list phy_info_timer;
244 struct vlan_group *vlgrp; 244 struct vlan_group *vlgrp;
245 uint16_t mng_vlan_id; 245 uint16_t mng_vlan_id;
246 uint32_t bd_number; 246 uint32_t bd_number;
247 uint32_t rx_buffer_len; 247 uint32_t rx_buffer_len;
248 uint32_t part_num;
249 uint32_t wol; 248 uint32_t wol;
250 uint32_t ksp3_port_a;
251 uint32_t smartspeed; 249 uint32_t smartspeed;
252 uint32_t en_mng_pt; 250 uint32_t en_mng_pt;
253 uint16_t link_speed; 251 uint16_t link_speed;
@@ -342,7 +340,9 @@ struct e1000_adapter {
342 boolean_t tso_force; 340 boolean_t tso_force;
343#endif 341#endif
344 boolean_t smart_power_down; /* phy smart power down */ 342 boolean_t smart_power_down; /* phy smart power down */
343 boolean_t quad_port_a;
345 unsigned long flags; 344 unsigned long flags;
345 uint32_t eeprom_wol;
346}; 346};
347 347
348enum e1000_state_t { 348enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 88a82ba88f57..3fccffdb27b5 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -183,6 +183,9 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
183 return -EINVAL; 183 return -EINVAL;
184 } 184 }
185 185
186 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
187 msleep(1);
188
186 if (ecmd->autoneg == AUTONEG_ENABLE) { 189 if (ecmd->autoneg == AUTONEG_ENABLE) {
187 hw->autoneg = 1; 190 hw->autoneg = 1;
188 if (hw->media_type == e1000_media_type_fiber) 191 if (hw->media_type == e1000_media_type_fiber)
@@ -199,16 +202,20 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
199 ADVERTISED_TP; 202 ADVERTISED_TP;
200 ecmd->advertising = hw->autoneg_advertised; 203 ecmd->advertising = hw->autoneg_advertised;
201 } else 204 } else
202 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) 205 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
206 clear_bit(__E1000_RESETTING, &adapter->flags);
203 return -EINVAL; 207 return -EINVAL;
208 }
204 209
205 /* reset the link */ 210 /* reset the link */
206 211
207 if (netif_running(adapter->netdev)) 212 if (netif_running(adapter->netdev)) {
208 e1000_reinit_locked(adapter); 213 e1000_down(adapter);
209 else 214 e1000_up(adapter);
215 } else
210 e1000_reset(adapter); 216 e1000_reset(adapter);
211 217
218 clear_bit(__E1000_RESETTING, &adapter->flags);
212 return 0; 219 return 0;
213} 220}
214 221
@@ -238,9 +245,13 @@ e1000_set_pauseparam(struct net_device *netdev,
238{ 245{
239 struct e1000_adapter *adapter = netdev_priv(netdev); 246 struct e1000_adapter *adapter = netdev_priv(netdev);
240 struct e1000_hw *hw = &adapter->hw; 247 struct e1000_hw *hw = &adapter->hw;
248 int retval = 0;
241 249
242 adapter->fc_autoneg = pause->autoneg; 250 adapter->fc_autoneg = pause->autoneg;
243 251
252 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
253 msleep(1);
254
244 if (pause->rx_pause && pause->tx_pause) 255 if (pause->rx_pause && pause->tx_pause)
245 hw->fc = e1000_fc_full; 256 hw->fc = e1000_fc_full;
246 else if (pause->rx_pause && !pause->tx_pause) 257 else if (pause->rx_pause && !pause->tx_pause)
@@ -253,15 +264,17 @@ e1000_set_pauseparam(struct net_device *netdev,
253 hw->original_fc = hw->fc; 264 hw->original_fc = hw->fc;
254 265
255 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 266 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
256 if (netif_running(adapter->netdev)) 267 if (netif_running(adapter->netdev)) {
257 e1000_reinit_locked(adapter); 268 e1000_down(adapter);
258 else 269 e1000_up(adapter);
270 } else
259 e1000_reset(adapter); 271 e1000_reset(adapter);
260 } else 272 } else
261 return ((hw->media_type == e1000_media_type_fiber) ? 273 retval = ((hw->media_type == e1000_media_type_fiber) ?
262 e1000_setup_link(hw) : e1000_force_mac_fc(hw)); 274 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
263 275
264 return 0; 276 clear_bit(__E1000_RESETTING, &adapter->flags);
277 return retval;
265} 278}
266 279
267static uint32_t 280static uint32_t
@@ -415,12 +428,12 @@ e1000_get_regs(struct net_device *netdev,
415 regs_buff[23] = regs_buff[18]; /* mdix mode */ 428 regs_buff[23] = regs_buff[18]; /* mdix mode */
416 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); 429 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
417 } else { 430 } else {
418 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 431 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
419 regs_buff[13] = (uint32_t)phy_data; /* cable length */ 432 regs_buff[13] = (uint32_t)phy_data; /* cable length */
420 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 433 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
421 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 434 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
422 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 435 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
423 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 436 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
424 regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */ 437 regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
425 regs_buff[18] = regs_buff[13]; /* cable polarity */ 438 regs_buff[18] = regs_buff[13]; /* cable polarity */
426 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 439 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
@@ -696,7 +709,6 @@ e1000_set_ringparam(struct net_device *netdev,
696 } 709 }
697 710
698 clear_bit(__E1000_RESETTING, &adapter->flags); 711 clear_bit(__E1000_RESETTING, &adapter->flags);
699
700 return 0; 712 return 0;
701err_setup_tx: 713err_setup_tx:
702 e1000_free_all_rx_resources(adapter); 714 e1000_free_all_rx_resources(adapter);
@@ -881,16 +893,17 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
881 893
882 *data = 0; 894 *data = 0;
883 895
896 /* NOTE: we don't test MSI interrupts here, yet */
884 /* Hook up test interrupt handler just for this test */ 897 /* Hook up test interrupt handler just for this test */
885 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, 898 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED,
886 netdev->name, netdev)) { 899 netdev->name, netdev))
887 shared_int = FALSE; 900 shared_int = FALSE;
888 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 901 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
889 netdev->name, netdev)){ 902 netdev->name, netdev)) {
890 *data = 1; 903 *data = 1;
891 return -1; 904 return -1;
892 } 905 }
893 DPRINTK(PROBE,INFO, "testing %s interrupt\n", 906 DPRINTK(HW, INFO, "testing %s interrupt\n",
894 (shared_int ? "shared" : "unshared")); 907 (shared_int ? "shared" : "unshared"));
895 908
896 /* Disable all the interrupts */ 909 /* Disable all the interrupts */
@@ -1256,11 +1269,10 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1256 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140); 1269 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
1257 /* autoneg off */ 1270 /* autoneg off */
1258 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140); 1271 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
1259 } else if (adapter->hw.phy_type == e1000_phy_gg82563) { 1272 } else if (adapter->hw.phy_type == e1000_phy_gg82563)
1260 e1000_write_phy_reg(&adapter->hw, 1273 e1000_write_phy_reg(&adapter->hw,
1261 GG82563_PHY_KMRN_MODE_CTRL, 1274 GG82563_PHY_KMRN_MODE_CTRL,
1262 0x1CC); 1275 0x1CC);
1263 }
1264 1276
1265 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL); 1277 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
1266 1278
@@ -1288,9 +1300,9 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1288 } 1300 }
1289 1301
1290 if (adapter->hw.media_type == e1000_media_type_copper && 1302 if (adapter->hw.media_type == e1000_media_type_copper &&
1291 adapter->hw.phy_type == e1000_phy_m88) { 1303 adapter->hw.phy_type == e1000_phy_m88)
1292 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1304 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1293 } else { 1305 else {
1294 /* Set the ILOS bit on the fiber Nic is half 1306 /* Set the ILOS bit on the fiber Nic is half
1295 * duplex link is detected. */ 1307 * duplex link is detected. */
1296 stat_reg = E1000_READ_REG(&adapter->hw, STATUS); 1308 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
@@ -1426,11 +1438,10 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
1426 case e1000_82546_rev_3: 1438 case e1000_82546_rev_3:
1427 default: 1439 default:
1428 hw->autoneg = TRUE; 1440 hw->autoneg = TRUE;
1429 if (hw->phy_type == e1000_phy_gg82563) { 1441 if (hw->phy_type == e1000_phy_gg82563)
1430 e1000_write_phy_reg(hw, 1442 e1000_write_phy_reg(hw,
1431 GG82563_PHY_KMRN_MODE_CTRL, 1443 GG82563_PHY_KMRN_MODE_CTRL,
1432 0x180); 1444 0x180);
1433 }
1434 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); 1445 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1435 if (phy_reg & MII_CR_LOOPBACK) { 1446 if (phy_reg & MII_CR_LOOPBACK) {
1436 phy_reg &= ~MII_CR_LOOPBACK; 1447 phy_reg &= ~MII_CR_LOOPBACK;
@@ -1590,6 +1601,8 @@ e1000_diag_test_count(struct net_device *netdev)
1590 return E1000_TEST_LEN; 1601 return E1000_TEST_LEN;
1591} 1602}
1592 1603
1604extern void e1000_power_up_phy(struct e1000_adapter *);
1605
1593static void 1606static void
1594e1000_diag_test(struct net_device *netdev, 1607e1000_diag_test(struct net_device *netdev,
1595 struct ethtool_test *eth_test, uint64_t *data) 1608 struct ethtool_test *eth_test, uint64_t *data)
@@ -1606,6 +1619,8 @@ e1000_diag_test(struct net_device *netdev,
1606 uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex; 1619 uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
1607 uint8_t autoneg = adapter->hw.autoneg; 1620 uint8_t autoneg = adapter->hw.autoneg;
1608 1621
1622 DPRINTK(HW, INFO, "offline testing starting\n");
1623
1609 /* Link test performed before hardware reset so autoneg doesn't 1624 /* Link test performed before hardware reset so autoneg doesn't
1610 * interfere with test result */ 1625 * interfere with test result */
1611 if (e1000_link_test(adapter, &data[4])) 1626 if (e1000_link_test(adapter, &data[4]))
@@ -1629,6 +1644,8 @@ e1000_diag_test(struct net_device *netdev,
1629 eth_test->flags |= ETH_TEST_FL_FAILED; 1644 eth_test->flags |= ETH_TEST_FL_FAILED;
1630 1645
1631 e1000_reset(adapter); 1646 e1000_reset(adapter);
1647 /* make sure the phy is powered up */
1648 e1000_power_up_phy(adapter);
1632 if (e1000_loopback_test(adapter, &data[3])) 1649 if (e1000_loopback_test(adapter, &data[3]))
1633 eth_test->flags |= ETH_TEST_FL_FAILED; 1650 eth_test->flags |= ETH_TEST_FL_FAILED;
1634 1651
@@ -1642,6 +1659,7 @@ e1000_diag_test(struct net_device *netdev,
1642 if (if_running) 1659 if (if_running)
1643 dev_open(netdev); 1660 dev_open(netdev);
1644 } else { 1661 } else {
1662 DPRINTK(HW, INFO, "online testing starting\n");
1645 /* Online tests */ 1663 /* Online tests */
1646 if (e1000_link_test(adapter, &data[4])) 1664 if (e1000_link_test(adapter, &data[4]))
1647 eth_test->flags |= ETH_TEST_FL_FAILED; 1665 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1657,14 +1675,12 @@ e1000_diag_test(struct net_device *netdev,
1657 msleep_interruptible(4 * 1000); 1675 msleep_interruptible(4 * 1000);
1658} 1676}
1659 1677
1660static void 1678static int e1000_wol_exclusion(struct e1000_adapter *adapter, struct ethtool_wolinfo *wol)
1661e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1662{ 1679{
1663 struct e1000_adapter *adapter = netdev_priv(netdev);
1664 struct e1000_hw *hw = &adapter->hw; 1680 struct e1000_hw *hw = &adapter->hw;
1681 int retval = 1; /* fail by default */
1665 1682
1666 switch (adapter->hw.device_id) { 1683 switch (hw->device_id) {
1667 case E1000_DEV_ID_82542:
1668 case E1000_DEV_ID_82543GC_FIBER: 1684 case E1000_DEV_ID_82543GC_FIBER:
1669 case E1000_DEV_ID_82543GC_COPPER: 1685 case E1000_DEV_ID_82543GC_COPPER:
1670 case E1000_DEV_ID_82544EI_FIBER: 1686 case E1000_DEV_ID_82544EI_FIBER:
@@ -1672,52 +1688,87 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1672 case E1000_DEV_ID_82545EM_FIBER: 1688 case E1000_DEV_ID_82545EM_FIBER:
1673 case E1000_DEV_ID_82545EM_COPPER: 1689 case E1000_DEV_ID_82545EM_COPPER:
1674 case E1000_DEV_ID_82546GB_QUAD_COPPER: 1690 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1691 case E1000_DEV_ID_82546GB_PCIE:
1692 /* these don't support WoL at all */
1675 wol->supported = 0; 1693 wol->supported = 0;
1676 wol->wolopts = 0; 1694 break;
1677 return;
1678
1679 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1680 /* device id 10B5 port-A supports wol */
1681 if (!adapter->ksp3_port_a) {
1682 wol->supported = 0;
1683 return;
1684 }
1685 /* KSP3 does not suppport UCAST wake-ups for any interface */
1686 wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
1687
1688 if (adapter->wol & E1000_WUFC_EX)
1689 DPRINTK(DRV, ERR, "Interface does not support "
1690 "directed (unicast) frame wake-up packets\n");
1691 wol->wolopts = 0;
1692 goto do_defaults;
1693
1694 case E1000_DEV_ID_82546EB_FIBER: 1695 case E1000_DEV_ID_82546EB_FIBER:
1695 case E1000_DEV_ID_82546GB_FIBER: 1696 case E1000_DEV_ID_82546GB_FIBER:
1696 case E1000_DEV_ID_82571EB_FIBER: 1697 case E1000_DEV_ID_82571EB_FIBER:
1697 /* Wake events only supported on port A for dual fiber */ 1698 case E1000_DEV_ID_82571EB_SERDES:
1699 case E1000_DEV_ID_82571EB_COPPER:
1700 /* Wake events not supported on port B */
1698 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { 1701 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1699 wol->supported = 0; 1702 wol->supported = 0;
1700 wol->wolopts = 0; 1703 break;
1701 return;
1702 } 1704 }
1703 /* Fall Through */ 1705 /* return success for non excluded adapter ports */
1704 1706 retval = 0;
1707 break;
1708 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1709 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1710 /* quad port adapters only support WoL on port A */
1711 if (!adapter->quad_port_a) {
1712 wol->supported = 0;
1713 break;
1714 }
1715 /* return success for non excluded adapter ports */
1716 retval = 0;
1717 break;
1705 default: 1718 default:
1706 wol->supported = WAKE_UCAST | WAKE_MCAST | 1719 /* dual port cards only support WoL on port A from now on
1707 WAKE_BCAST | WAKE_MAGIC; 1720 * unless it was enabled in the eeprom for port B
1708 wol->wolopts = 0; 1721 * so exclude FUNC_1 ports from having WoL enabled */
1722 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1 &&
1723 !adapter->eeprom_wol) {
1724 wol->supported = 0;
1725 break;
1726 }
1709 1727
1710do_defaults: 1728 retval = 0;
1711 if (adapter->wol & E1000_WUFC_EX) 1729 }
1712 wol->wolopts |= WAKE_UCAST; 1730
1713 if (adapter->wol & E1000_WUFC_MC) 1731 return retval;
1714 wol->wolopts |= WAKE_MCAST; 1732}
1715 if (adapter->wol & E1000_WUFC_BC) 1733
1716 wol->wolopts |= WAKE_BCAST; 1734static void
1717 if (adapter->wol & E1000_WUFC_MAG) 1735e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1718 wol->wolopts |= WAKE_MAGIC; 1736{
1737 struct e1000_adapter *adapter = netdev_priv(netdev);
1738
1739 wol->supported = WAKE_UCAST | WAKE_MCAST |
1740 WAKE_BCAST | WAKE_MAGIC;
1741 wol->wolopts = 0;
1742
1743 /* this function will set ->supported = 0 and return 1 if wol is not
1744 * supported by this hardware */
1745 if (e1000_wol_exclusion(adapter, wol))
1719 return; 1746 return;
1747
1748 /* apply any specific unsupported masks here */
1749 switch (adapter->hw.device_id) {
1750 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1751 /* KSP3 does not suppport UCAST wake-ups */
1752 wol->supported &= ~WAKE_UCAST;
1753
1754 if (adapter->wol & E1000_WUFC_EX)
1755 DPRINTK(DRV, ERR, "Interface does not support "
1756 "directed (unicast) frame wake-up packets\n");
1757 break;
1758 default:
1759 break;
1720 } 1760 }
1761
1762 if (adapter->wol & E1000_WUFC_EX)
1763 wol->wolopts |= WAKE_UCAST;
1764 if (adapter->wol & E1000_WUFC_MC)
1765 wol->wolopts |= WAKE_MCAST;
1766 if (adapter->wol & E1000_WUFC_BC)
1767 wol->wolopts |= WAKE_BCAST;
1768 if (adapter->wol & E1000_WUFC_MAG)
1769 wol->wolopts |= WAKE_MAGIC;
1770
1771 return;
1721} 1772}
1722 1773
1723static int 1774static int
@@ -1726,51 +1777,35 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1726 struct e1000_adapter *adapter = netdev_priv(netdev); 1777 struct e1000_adapter *adapter = netdev_priv(netdev);
1727 struct e1000_hw *hw = &adapter->hw; 1778 struct e1000_hw *hw = &adapter->hw;
1728 1779
1729 switch (adapter->hw.device_id) { 1780 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1730 case E1000_DEV_ID_82542: 1781 return -EOPNOTSUPP;
1731 case E1000_DEV_ID_82543GC_FIBER: 1782
1732 case E1000_DEV_ID_82543GC_COPPER: 1783 if (e1000_wol_exclusion(adapter, wol))
1733 case E1000_DEV_ID_82544EI_FIBER:
1734 case E1000_DEV_ID_82546EB_QUAD_COPPER:
1735 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1736 case E1000_DEV_ID_82545EM_FIBER:
1737 case E1000_DEV_ID_82545EM_COPPER:
1738 return wol->wolopts ? -EOPNOTSUPP : 0; 1784 return wol->wolopts ? -EOPNOTSUPP : 0;
1739 1785
1786 switch (hw->device_id) {
1740 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1787 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1741 /* device id 10B5 port-A supports wol */
1742 if (!adapter->ksp3_port_a)
1743 return wol->wolopts ? -EOPNOTSUPP : 0;
1744
1745 if (wol->wolopts & WAKE_UCAST) { 1788 if (wol->wolopts & WAKE_UCAST) {
1746 DPRINTK(DRV, ERR, "Interface does not support " 1789 DPRINTK(DRV, ERR, "Interface does not support "
1747 "directed (unicast) frame wake-up packets\n"); 1790 "directed (unicast) frame wake-up packets\n");
1748 return -EOPNOTSUPP; 1791 return -EOPNOTSUPP;
1749 } 1792 }
1750 1793 break;
1751 case E1000_DEV_ID_82546EB_FIBER:
1752 case E1000_DEV_ID_82546GB_FIBER:
1753 case E1000_DEV_ID_82571EB_FIBER:
1754 /* Wake events only supported on port A for dual fiber */
1755 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1756 return wol->wolopts ? -EOPNOTSUPP : 0;
1757 /* Fall Through */
1758
1759 default: 1794 default:
1760 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1795 break;
1761 return -EOPNOTSUPP; 1796 }
1762 1797
1763 adapter->wol = 0; 1798 /* these settings will always override what we currently have */
1799 adapter->wol = 0;
1764 1800
1765 if (wol->wolopts & WAKE_UCAST) 1801 if (wol->wolopts & WAKE_UCAST)
1766 adapter->wol |= E1000_WUFC_EX; 1802 adapter->wol |= E1000_WUFC_EX;
1767 if (wol->wolopts & WAKE_MCAST) 1803 if (wol->wolopts & WAKE_MCAST)
1768 adapter->wol |= E1000_WUFC_MC; 1804 adapter->wol |= E1000_WUFC_MC;
1769 if (wol->wolopts & WAKE_BCAST) 1805 if (wol->wolopts & WAKE_BCAST)
1770 adapter->wol |= E1000_WUFC_BC; 1806 adapter->wol |= E1000_WUFC_BC;
1771 if (wol->wolopts & WAKE_MAGIC) 1807 if (wol->wolopts & WAKE_MAGIC)
1772 adapter->wol |= E1000_WUFC_MAG; 1808 adapter->wol |= E1000_WUFC_MAG;
1773 }
1774 1809
1775 return 0; 1810 return 0;
1776} 1811}
@@ -1895,8 +1930,8 @@ static struct ethtool_ops e1000_ethtool_ops = {
1895 .get_regs = e1000_get_regs, 1930 .get_regs = e1000_get_regs,
1896 .get_wol = e1000_get_wol, 1931 .get_wol = e1000_get_wol,
1897 .set_wol = e1000_set_wol, 1932 .set_wol = e1000_set_wol,
1898 .get_msglevel = e1000_get_msglevel, 1933 .get_msglevel = e1000_get_msglevel,
1899 .set_msglevel = e1000_set_msglevel, 1934 .set_msglevel = e1000_set_msglevel,
1900 .nway_reset = e1000_nway_reset, 1935 .nway_reset = e1000_nway_reset,
1901 .get_link = ethtool_op_get_link, 1936 .get_link = ethtool_op_get_link,
1902 .get_eeprom_len = e1000_get_eeprom_len, 1937 .get_eeprom_len = e1000_get_eeprom_len,
@@ -1904,17 +1939,17 @@ static struct ethtool_ops e1000_ethtool_ops = {
1904 .set_eeprom = e1000_set_eeprom, 1939 .set_eeprom = e1000_set_eeprom,
1905 .get_ringparam = e1000_get_ringparam, 1940 .get_ringparam = e1000_get_ringparam,
1906 .set_ringparam = e1000_set_ringparam, 1941 .set_ringparam = e1000_set_ringparam,
1907 .get_pauseparam = e1000_get_pauseparam, 1942 .get_pauseparam = e1000_get_pauseparam,
1908 .set_pauseparam = e1000_set_pauseparam, 1943 .set_pauseparam = e1000_set_pauseparam,
1909 .get_rx_csum = e1000_get_rx_csum, 1944 .get_rx_csum = e1000_get_rx_csum,
1910 .set_rx_csum = e1000_set_rx_csum, 1945 .set_rx_csum = e1000_set_rx_csum,
1911 .get_tx_csum = e1000_get_tx_csum, 1946 .get_tx_csum = e1000_get_tx_csum,
1912 .set_tx_csum = e1000_set_tx_csum, 1947 .set_tx_csum = e1000_set_tx_csum,
1913 .get_sg = ethtool_op_get_sg, 1948 .get_sg = ethtool_op_get_sg,
1914 .set_sg = ethtool_op_set_sg, 1949 .set_sg = ethtool_op_set_sg,
1915#ifdef NETIF_F_TSO 1950#ifdef NETIF_F_TSO
1916 .get_tso = ethtool_op_get_tso, 1951 .get_tso = ethtool_op_get_tso,
1917 .set_tso = e1000_set_tso, 1952 .set_tso = e1000_set_tso,
1918#endif 1953#endif
1919 .self_test_count = e1000_diag_test_count, 1954 .self_test_count = e1000_diag_test_count,
1920 .self_test = e1000_diag_test, 1955 .self_test = e1000_diag_test,
@@ -1922,7 +1957,7 @@ static struct ethtool_ops e1000_ethtool_ops = {
1922 .phys_id = e1000_phys_id, 1957 .phys_id = e1000_phys_id,
1923 .get_stats_count = e1000_get_stats_count, 1958 .get_stats_count = e1000_get_stats_count,
1924 .get_ethtool_stats = e1000_get_ethtool_stats, 1959 .get_ethtool_stats = e1000_get_ethtool_stats,
1925 .get_perm_addr = ethtool_op_get_perm_addr, 1960 .get_perm_addr = ethtool_op_get_perm_addr,
1926}; 1961};
1927 1962
1928void e1000_set_ethtool_ops(struct net_device *netdev) 1963void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 583518ae49ce..a6f8f4fce701 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -31,6 +31,7 @@
31 * Shared functions for accessing and configuring the MAC 31 * Shared functions for accessing and configuring the MAC
32 */ 32 */
33 33
34
34#include "e1000_hw.h" 35#include "e1000_hw.h"
35 36
36static int32_t e1000_set_phy_type(struct e1000_hw *hw); 37static int32_t e1000_set_phy_type(struct e1000_hw *hw);
@@ -105,6 +106,33 @@ static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
105 uint16_t duplex); 106 uint16_t duplex);
106static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 107static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
107 108
109static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw,
110 uint32_t segment);
111static int32_t e1000_get_software_flag(struct e1000_hw *hw);
112static int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
113static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
114static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
115static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
116 uint16_t words, uint16_t *data);
117static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
118 uint8_t* data);
119static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
120 uint16_t *data);
121static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
122 uint16_t *data);
123static void e1000_release_software_flag(struct e1000_hw *hw);
124static void e1000_release_software_semaphore(struct e1000_hw *hw);
125static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw,
126 uint32_t no_snoop);
127static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw,
128 uint32_t index, uint8_t byte);
129static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
130 uint16_t words, uint16_t *data);
131static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
132 uint8_t data);
133static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
134 uint16_t data);
135
108/* IGP cable length table */ 136/* IGP cable length table */
109static const 137static const
110uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 138uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
@@ -139,10 +167,10 @@ e1000_set_phy_type(struct e1000_hw *hw)
139{ 167{
140 DEBUGFUNC("e1000_set_phy_type"); 168 DEBUGFUNC("e1000_set_phy_type");
141 169
142 if(hw->mac_type == e1000_undefined) 170 if (hw->mac_type == e1000_undefined)
143 return -E1000_ERR_PHY_TYPE; 171 return -E1000_ERR_PHY_TYPE;
144 172
145 switch(hw->phy_id) { 173 switch (hw->phy_id) {
146 case M88E1000_E_PHY_ID: 174 case M88E1000_E_PHY_ID:
147 case M88E1000_I_PHY_ID: 175 case M88E1000_I_PHY_ID:
148 case M88E1011_I_PHY_ID: 176 case M88E1011_I_PHY_ID:
@@ -150,10 +178,10 @@ e1000_set_phy_type(struct e1000_hw *hw)
150 hw->phy_type = e1000_phy_m88; 178 hw->phy_type = e1000_phy_m88;
151 break; 179 break;
152 case IGP01E1000_I_PHY_ID: 180 case IGP01E1000_I_PHY_ID:
153 if(hw->mac_type == e1000_82541 || 181 if (hw->mac_type == e1000_82541 ||
154 hw->mac_type == e1000_82541_rev_2 || 182 hw->mac_type == e1000_82541_rev_2 ||
155 hw->mac_type == e1000_82547 || 183 hw->mac_type == e1000_82547 ||
156 hw->mac_type == e1000_82547_rev_2) { 184 hw->mac_type == e1000_82547_rev_2) {
157 hw->phy_type = e1000_phy_igp; 185 hw->phy_type = e1000_phy_igp;
158 break; 186 break;
159 } 187 }
@@ -180,6 +208,7 @@ e1000_set_phy_type(struct e1000_hw *hw)
180 return E1000_SUCCESS; 208 return E1000_SUCCESS;
181} 209}
182 210
211
183/****************************************************************************** 212/******************************************************************************
184 * IGP phy init script - initializes the GbE PHY 213 * IGP phy init script - initializes the GbE PHY
185 * 214 *
@@ -193,7 +222,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
193 222
194 DEBUGFUNC("e1000_phy_init_script"); 223 DEBUGFUNC("e1000_phy_init_script");
195 224
196 if(hw->phy_init_script) { 225 if (hw->phy_init_script) {
197 msec_delay(20); 226 msec_delay(20);
198 227
199 /* Save off the current value of register 0x2F5B to be restored at 228 /* Save off the current value of register 0x2F5B to be restored at
@@ -209,7 +238,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
209 238
210 msec_delay(5); 239 msec_delay(5);
211 240
212 switch(hw->mac_type) { 241 switch (hw->mac_type) {
213 case e1000_82541: 242 case e1000_82541:
214 case e1000_82547: 243 case e1000_82547:
215 e1000_write_phy_reg(hw, 0x1F95, 0x0001); 244 e1000_write_phy_reg(hw, 0x1F95, 0x0001);
@@ -246,22 +275,22 @@ e1000_phy_init_script(struct e1000_hw *hw)
246 /* Now enable the transmitter */ 275 /* Now enable the transmitter */
247 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 276 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
248 277
249 if(hw->mac_type == e1000_82547) { 278 if (hw->mac_type == e1000_82547) {
250 uint16_t fused, fine, coarse; 279 uint16_t fused, fine, coarse;
251 280
252 /* Move to analog registers page */ 281 /* Move to analog registers page */
253 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); 282 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
254 283
255 if(!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { 284 if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
256 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused); 285 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
257 286
258 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; 287 fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
259 coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; 288 coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
260 289
261 if(coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { 290 if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
262 coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10; 291 coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
263 fine -= IGP01E1000_ANALOG_FUSE_FINE_1; 292 fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
264 } else if(coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH) 293 } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
265 fine -= IGP01E1000_ANALOG_FUSE_FINE_10; 294 fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
266 295
267 fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | 296 fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
@@ -360,6 +389,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
360 case E1000_DEV_ID_82571EB_COPPER: 389 case E1000_DEV_ID_82571EB_COPPER:
361 case E1000_DEV_ID_82571EB_FIBER: 390 case E1000_DEV_ID_82571EB_FIBER:
362 case E1000_DEV_ID_82571EB_SERDES: 391 case E1000_DEV_ID_82571EB_SERDES:
392 case E1000_DEV_ID_82571EB_QUAD_COPPER:
363 hw->mac_type = e1000_82571; 393 hw->mac_type = e1000_82571;
364 break; 394 break;
365 case E1000_DEV_ID_82572EI_COPPER: 395 case E1000_DEV_ID_82572EI_COPPER:
@@ -391,7 +421,7 @@ e1000_set_mac_type(struct e1000_hw *hw)
391 return -E1000_ERR_MAC_TYPE; 421 return -E1000_ERR_MAC_TYPE;
392 } 422 }
393 423
394 switch(hw->mac_type) { 424 switch (hw->mac_type) {
395 case e1000_ich8lan: 425 case e1000_ich8lan:
396 hw->swfwhw_semaphore_present = TRUE; 426 hw->swfwhw_semaphore_present = TRUE;
397 hw->asf_firmware_present = TRUE; 427 hw->asf_firmware_present = TRUE;
@@ -429,7 +459,7 @@ e1000_set_media_type(struct e1000_hw *hw)
429 459
430 DEBUGFUNC("e1000_set_media_type"); 460 DEBUGFUNC("e1000_set_media_type");
431 461
432 if(hw->mac_type != e1000_82543) { 462 if (hw->mac_type != e1000_82543) {
433 /* tbi_compatibility is only valid on 82543 */ 463 /* tbi_compatibility is only valid on 82543 */
434 hw->tbi_compatibility_en = FALSE; 464 hw->tbi_compatibility_en = FALSE;
435 } 465 }
@@ -489,16 +519,16 @@ e1000_reset_hw(struct e1000_hw *hw)
489 DEBUGFUNC("e1000_reset_hw"); 519 DEBUGFUNC("e1000_reset_hw");
490 520
491 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 521 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
492 if(hw->mac_type == e1000_82542_rev2_0) { 522 if (hw->mac_type == e1000_82542_rev2_0) {
493 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 523 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
494 e1000_pci_clear_mwi(hw); 524 e1000_pci_clear_mwi(hw);
495 } 525 }
496 526
497 if(hw->bus_type == e1000_bus_type_pci_express) { 527 if (hw->bus_type == e1000_bus_type_pci_express) {
498 /* Prevent the PCI-E bus from sticking if there is no TLP connection 528 /* Prevent the PCI-E bus from sticking if there is no TLP connection
499 * on the last TLP read/write transaction when MAC is reset. 529 * on the last TLP read/write transaction when MAC is reset.
500 */ 530 */
501 if(e1000_disable_pciex_master(hw) != E1000_SUCCESS) { 531 if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
502 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 532 DEBUGOUT("PCI-E Master disable polling has failed.\n");
503 } 533 }
504 } 534 }
@@ -526,14 +556,14 @@ e1000_reset_hw(struct e1000_hw *hw)
526 ctrl = E1000_READ_REG(hw, CTRL); 556 ctrl = E1000_READ_REG(hw, CTRL);
527 557
528 /* Must reset the PHY before resetting the MAC */ 558 /* Must reset the PHY before resetting the MAC */
529 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 559 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
530 E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST)); 560 E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
531 msec_delay(5); 561 msec_delay(5);
532 } 562 }
533 563
534 /* Must acquire the MDIO ownership before MAC reset. 564 /* Must acquire the MDIO ownership before MAC reset.
535 * Ownership defaults to firmware after a reset. */ 565 * Ownership defaults to firmware after a reset. */
536 if(hw->mac_type == e1000_82573) { 566 if (hw->mac_type == e1000_82573) {
537 timeout = 10; 567 timeout = 10;
538 568
539 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); 569 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
@@ -543,14 +573,14 @@ e1000_reset_hw(struct e1000_hw *hw)
543 E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl); 573 E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
544 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL); 574 extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
545 575
546 if(extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) 576 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
547 break; 577 break;
548 else 578 else
549 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 579 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
550 580
551 msec_delay(2); 581 msec_delay(2);
552 timeout--; 582 timeout--;
553 } while(timeout); 583 } while (timeout);
554 } 584 }
555 585
556 /* Workaround for ICH8 bit corruption issue in FIFO memory */ 586 /* Workaround for ICH8 bit corruption issue in FIFO memory */
@@ -568,7 +598,7 @@ e1000_reset_hw(struct e1000_hw *hw)
568 */ 598 */
569 DEBUGOUT("Issuing a global reset to MAC\n"); 599 DEBUGOUT("Issuing a global reset to MAC\n");
570 600
571 switch(hw->mac_type) { 601 switch (hw->mac_type) {
572 case e1000_82544: 602 case e1000_82544:
573 case e1000_82540: 603 case e1000_82540:
574 case e1000_82545: 604 case e1000_82545:
@@ -607,7 +637,7 @@ e1000_reset_hw(struct e1000_hw *hw)
607 * device. Later controllers reload the EEPROM automatically, so just wait 637 * device. Later controllers reload the EEPROM automatically, so just wait
608 * for reload to complete. 638 * for reload to complete.
609 */ 639 */
610 switch(hw->mac_type) { 640 switch (hw->mac_type) {
611 case e1000_82542_rev2_0: 641 case e1000_82542_rev2_0:
612 case e1000_82542_rev2_1: 642 case e1000_82542_rev2_1:
613 case e1000_82543: 643 case e1000_82543:
@@ -642,7 +672,7 @@ e1000_reset_hw(struct e1000_hw *hw)
642 case e1000_ich8lan: 672 case e1000_ich8lan:
643 case e1000_80003es2lan: 673 case e1000_80003es2lan:
644 ret_val = e1000_get_auto_rd_done(hw); 674 ret_val = e1000_get_auto_rd_done(hw);
645 if(ret_val) 675 if (ret_val)
646 /* We don't want to continue accessing MAC registers. */ 676 /* We don't want to continue accessing MAC registers. */
647 return ret_val; 677 return ret_val;
648 break; 678 break;
@@ -653,13 +683,13 @@ e1000_reset_hw(struct e1000_hw *hw)
653 } 683 }
654 684
655 /* Disable HW ARPs on ASF enabled adapters */ 685 /* Disable HW ARPs on ASF enabled adapters */
656 if(hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) { 686 if (hw->mac_type >= e1000_82540 && hw->mac_type <= e1000_82547_rev_2) {
657 manc = E1000_READ_REG(hw, MANC); 687 manc = E1000_READ_REG(hw, MANC);
658 manc &= ~(E1000_MANC_ARP_EN); 688 manc &= ~(E1000_MANC_ARP_EN);
659 E1000_WRITE_REG(hw, MANC, manc); 689 E1000_WRITE_REG(hw, MANC, manc);
660 } 690 }
661 691
662 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 692 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
663 e1000_phy_init_script(hw); 693 e1000_phy_init_script(hw);
664 694
665 /* Configure activity LED after PHY reset */ 695 /* Configure activity LED after PHY reset */
@@ -677,8 +707,8 @@ e1000_reset_hw(struct e1000_hw *hw)
677 icr = E1000_READ_REG(hw, ICR); 707 icr = E1000_READ_REG(hw, ICR);
678 708
679 /* If MWI was previously enabled, reenable it. */ 709 /* If MWI was previously enabled, reenable it. */
680 if(hw->mac_type == e1000_82542_rev2_0) { 710 if (hw->mac_type == e1000_82542_rev2_0) {
681 if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 711 if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
682 e1000_pci_set_mwi(hw); 712 e1000_pci_set_mwi(hw);
683 } 713 }
684 714
@@ -718,9 +748,20 @@ e1000_init_hw(struct e1000_hw *hw)
718 748
719 DEBUGFUNC("e1000_init_hw"); 749 DEBUGFUNC("e1000_init_hw");
720 750
751 /* force full DMA clock frequency for 10/100 on ICH8 A0-B0 */
752 if (hw->mac_type == e1000_ich8lan) {
753 reg_data = E1000_READ_REG(hw, TARC0);
754 reg_data |= 0x30000000;
755 E1000_WRITE_REG(hw, TARC0, reg_data);
756
757 reg_data = E1000_READ_REG(hw, STATUS);
758 reg_data &= ~0x80000000;
759 E1000_WRITE_REG(hw, STATUS, reg_data);
760 }
761
721 /* Initialize Identification LED */ 762 /* Initialize Identification LED */
722 ret_val = e1000_id_led_init(hw); 763 ret_val = e1000_id_led_init(hw);
723 if(ret_val) { 764 if (ret_val) {
724 DEBUGOUT("Error Initializing Identification LED\n"); 765 DEBUGOUT("Error Initializing Identification LED\n");
725 return ret_val; 766 return ret_val;
726 } 767 }
@@ -738,7 +779,7 @@ e1000_init_hw(struct e1000_hw *hw)
738 } 779 }
739 780
740 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 781 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
741 if(hw->mac_type == e1000_82542_rev2_0) { 782 if (hw->mac_type == e1000_82542_rev2_0) {
742 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 783 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
743 e1000_pci_clear_mwi(hw); 784 e1000_pci_clear_mwi(hw);
744 E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST); 785 E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
@@ -752,11 +793,11 @@ e1000_init_hw(struct e1000_hw *hw)
752 e1000_init_rx_addrs(hw); 793 e1000_init_rx_addrs(hw);
753 794
754 /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ 795 /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
755 if(hw->mac_type == e1000_82542_rev2_0) { 796 if (hw->mac_type == e1000_82542_rev2_0) {
756 E1000_WRITE_REG(hw, RCTL, 0); 797 E1000_WRITE_REG(hw, RCTL, 0);
757 E1000_WRITE_FLUSH(hw); 798 E1000_WRITE_FLUSH(hw);
758 msec_delay(1); 799 msec_delay(1);
759 if(hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE) 800 if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
760 e1000_pci_set_mwi(hw); 801 e1000_pci_set_mwi(hw);
761 } 802 }
762 803
@@ -765,7 +806,7 @@ e1000_init_hw(struct e1000_hw *hw)
765 mta_size = E1000_MC_TBL_SIZE; 806 mta_size = E1000_MC_TBL_SIZE;
766 if (hw->mac_type == e1000_ich8lan) 807 if (hw->mac_type == e1000_ich8lan)
767 mta_size = E1000_MC_TBL_SIZE_ICH8LAN; 808 mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
768 for(i = 0; i < mta_size; i++) { 809 for (i = 0; i < mta_size; i++) {
769 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 810 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
770 /* use write flush to prevent Memory Write Block (MWB) from 811 /* use write flush to prevent Memory Write Block (MWB) from
771 * occuring when accessing our register space */ 812 * occuring when accessing our register space */
@@ -777,18 +818,18 @@ e1000_init_hw(struct e1000_hw *hw)
777 * gives equal priority to transmits and receives. Valid only on 818 * gives equal priority to transmits and receives. Valid only on
778 * 82542 and 82543 silicon. 819 * 82542 and 82543 silicon.
779 */ 820 */
780 if(hw->dma_fairness && hw->mac_type <= e1000_82543) { 821 if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
781 ctrl = E1000_READ_REG(hw, CTRL); 822 ctrl = E1000_READ_REG(hw, CTRL);
782 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR); 823 E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
783 } 824 }
784 825
785 switch(hw->mac_type) { 826 switch (hw->mac_type) {
786 case e1000_82545_rev_3: 827 case e1000_82545_rev_3:
787 case e1000_82546_rev_3: 828 case e1000_82546_rev_3:
788 break; 829 break;
789 default: 830 default:
790 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */ 831 /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
791 if(hw->bus_type == e1000_bus_type_pcix) { 832 if (hw->bus_type == e1000_bus_type_pcix) {
792 e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word); 833 e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word);
793 e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, 834 e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI,
794 &pcix_stat_hi_word); 835 &pcix_stat_hi_word);
@@ -796,9 +837,9 @@ e1000_init_hw(struct e1000_hw *hw)
796 PCIX_COMMAND_MMRBC_SHIFT; 837 PCIX_COMMAND_MMRBC_SHIFT;
797 stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> 838 stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
798 PCIX_STATUS_HI_MMRBC_SHIFT; 839 PCIX_STATUS_HI_MMRBC_SHIFT;
799 if(stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) 840 if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
800 stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; 841 stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
801 if(cmd_mmrbc > stat_mmrbc) { 842 if (cmd_mmrbc > stat_mmrbc) {
802 pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK; 843 pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK;
803 pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; 844 pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
804 e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, 845 e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER,
@@ -816,7 +857,7 @@ e1000_init_hw(struct e1000_hw *hw)
816 ret_val = e1000_setup_link(hw); 857 ret_val = e1000_setup_link(hw);
817 858
818 /* Set the transmit descriptor write-back policy */ 859 /* Set the transmit descriptor write-back policy */
819 if(hw->mac_type > e1000_82544) { 860 if (hw->mac_type > e1000_82544) {
820 ctrl = E1000_READ_REG(hw, TXDCTL); 861 ctrl = E1000_READ_REG(hw, TXDCTL);
821 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 862 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
822 switch (hw->mac_type) { 863 switch (hw->mac_type) {
@@ -867,14 +908,13 @@ e1000_init_hw(struct e1000_hw *hw)
867 case e1000_ich8lan: 908 case e1000_ich8lan:
868 ctrl = E1000_READ_REG(hw, TXDCTL1); 909 ctrl = E1000_READ_REG(hw, TXDCTL1);
869 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB; 910 ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
870 if(hw->mac_type >= e1000_82571) 911 if (hw->mac_type >= e1000_82571)
871 ctrl |= E1000_TXDCTL_COUNT_DESC; 912 ctrl |= E1000_TXDCTL_COUNT_DESC;
872 E1000_WRITE_REG(hw, TXDCTL1, ctrl); 913 E1000_WRITE_REG(hw, TXDCTL1, ctrl);
873 break; 914 break;
874 } 915 }
875 916
876 917
877
878 if (hw->mac_type == e1000_82573) { 918 if (hw->mac_type == e1000_82573) {
879 uint32_t gcr = E1000_READ_REG(hw, GCR); 919 uint32_t gcr = E1000_READ_REG(hw, GCR);
880 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; 920 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
@@ -918,10 +958,10 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
918 958
919 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 959 DEBUGFUNC("e1000_adjust_serdes_amplitude");
920 960
921 if(hw->media_type != e1000_media_type_internal_serdes) 961 if (hw->media_type != e1000_media_type_internal_serdes)
922 return E1000_SUCCESS; 962 return E1000_SUCCESS;
923 963
924 switch(hw->mac_type) { 964 switch (hw->mac_type) {
925 case e1000_82545_rev_3: 965 case e1000_82545_rev_3:
926 case e1000_82546_rev_3: 966 case e1000_82546_rev_3:
927 break; 967 break;
@@ -934,11 +974,11 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
934 return ret_val; 974 return ret_val;
935 } 975 }
936 976
937 if(eeprom_data != EEPROM_RESERVED_WORD) { 977 if (eeprom_data != EEPROM_RESERVED_WORD) {
938 /* Adjust SERDES output amplitude only. */ 978 /* Adjust SERDES output amplitude only. */
939 eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; 979 eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
940 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); 980 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
941 if(ret_val) 981 if (ret_val)
942 return ret_val; 982 return ret_val;
943 } 983 }
944 984
@@ -1006,10 +1046,10 @@ e1000_setup_link(struct e1000_hw *hw)
1006 * in case we get disconnected and then reconnected into a different 1046 * in case we get disconnected and then reconnected into a different
1007 * hub or switch with different Flow Control capabilities. 1047 * hub or switch with different Flow Control capabilities.
1008 */ 1048 */
1009 if(hw->mac_type == e1000_82542_rev2_0) 1049 if (hw->mac_type == e1000_82542_rev2_0)
1010 hw->fc &= (~e1000_fc_tx_pause); 1050 hw->fc &= (~e1000_fc_tx_pause);
1011 1051
1012 if((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) 1052 if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
1013 hw->fc &= (~e1000_fc_rx_pause); 1053 hw->fc &= (~e1000_fc_rx_pause);
1014 1054
1015 hw->original_fc = hw->fc; 1055 hw->original_fc = hw->fc;
@@ -1024,12 +1064,12 @@ e1000_setup_link(struct e1000_hw *hw)
1024 * or e1000_phy_setup() is called. 1064 * or e1000_phy_setup() is called.
1025 */ 1065 */
1026 if (hw->mac_type == e1000_82543) { 1066 if (hw->mac_type == e1000_82543) {
1027 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 1067 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
1028 1, &eeprom_data); 1068 1, &eeprom_data);
1029 if (ret_val) { 1069 if (ret_val) {
1030 DEBUGOUT("EEPROM Read Error\n"); 1070 DEBUGOUT("EEPROM Read Error\n");
1031 return -E1000_ERR_EEPROM; 1071 return -E1000_ERR_EEPROM;
1032 } 1072 }
1033 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << 1073 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
1034 SWDPIO__EXT_SHIFT); 1074 SWDPIO__EXT_SHIFT);
1035 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); 1075 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
@@ -1062,14 +1102,14 @@ e1000_setup_link(struct e1000_hw *hw)
1062 * ability to transmit pause frames in not enabled, then these 1102 * ability to transmit pause frames in not enabled, then these
1063 * registers will be set to 0. 1103 * registers will be set to 0.
1064 */ 1104 */
1065 if(!(hw->fc & e1000_fc_tx_pause)) { 1105 if (!(hw->fc & e1000_fc_tx_pause)) {
1066 E1000_WRITE_REG(hw, FCRTL, 0); 1106 E1000_WRITE_REG(hw, FCRTL, 0);
1067 E1000_WRITE_REG(hw, FCRTH, 0); 1107 E1000_WRITE_REG(hw, FCRTH, 0);
1068 } else { 1108 } else {
1069 /* We need to set up the Receive Threshold high and low water marks 1109 /* We need to set up the Receive Threshold high and low water marks
1070 * as well as (optionally) enabling the transmission of XON frames. 1110 * as well as (optionally) enabling the transmission of XON frames.
1071 */ 1111 */
1072 if(hw->fc_send_xon) { 1112 if (hw->fc_send_xon) {
1073 E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); 1113 E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
1074 E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water); 1114 E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
1075 } else { 1115 } else {
@@ -1116,11 +1156,11 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1116 * the EEPROM. 1156 * the EEPROM.
1117 */ 1157 */
1118 ctrl = E1000_READ_REG(hw, CTRL); 1158 ctrl = E1000_READ_REG(hw, CTRL);
1119 if(hw->media_type == e1000_media_type_fiber) 1159 if (hw->media_type == e1000_media_type_fiber)
1120 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 1160 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
1121 1161
1122 ret_val = e1000_adjust_serdes_amplitude(hw); 1162 ret_val = e1000_adjust_serdes_amplitude(hw);
1123 if(ret_val) 1163 if (ret_val)
1124 return ret_val; 1164 return ret_val;
1125 1165
1126 /* Take the link out of reset */ 1166 /* Take the link out of reset */
@@ -1128,7 +1168,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1128 1168
1129 /* Adjust VCO speed to improve BER performance */ 1169 /* Adjust VCO speed to improve BER performance */
1130 ret_val = e1000_set_vco_speed(hw); 1170 ret_val = e1000_set_vco_speed(hw);
1131 if(ret_val) 1171 if (ret_val)
1132 return ret_val; 1172 return ret_val;
1133 1173
1134 e1000_config_collision_dist(hw); 1174 e1000_config_collision_dist(hw);
@@ -1199,15 +1239,15 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1199 * less than 500 milliseconds even if the other end is doing it in SW). 1239 * less than 500 milliseconds even if the other end is doing it in SW).
1200 * For internal serdes, we just assume a signal is present, then poll. 1240 * For internal serdes, we just assume a signal is present, then poll.
1201 */ 1241 */
1202 if(hw->media_type == e1000_media_type_internal_serdes || 1242 if (hw->media_type == e1000_media_type_internal_serdes ||
1203 (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) { 1243 (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
1204 DEBUGOUT("Looking for Link\n"); 1244 DEBUGOUT("Looking for Link\n");
1205 for(i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { 1245 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
1206 msec_delay(10); 1246 msec_delay(10);
1207 status = E1000_READ_REG(hw, STATUS); 1247 status = E1000_READ_REG(hw, STATUS);
1208 if(status & E1000_STATUS_LU) break; 1248 if (status & E1000_STATUS_LU) break;
1209 } 1249 }
1210 if(i == (LINK_UP_TIMEOUT / 10)) { 1250 if (i == (LINK_UP_TIMEOUT / 10)) {
1211 DEBUGOUT("Never got a valid link from auto-neg!!!\n"); 1251 DEBUGOUT("Never got a valid link from auto-neg!!!\n");
1212 hw->autoneg_failed = 1; 1252 hw->autoneg_failed = 1;
1213 /* AutoNeg failed to achieve a link, so we'll call 1253 /* AutoNeg failed to achieve a link, so we'll call
@@ -1216,7 +1256,7 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1216 * non-autonegotiating link partners. 1256 * non-autonegotiating link partners.
1217 */ 1257 */
1218 ret_val = e1000_check_for_link(hw); 1258 ret_val = e1000_check_for_link(hw);
1219 if(ret_val) { 1259 if (ret_val) {
1220 DEBUGOUT("Error while checking for link\n"); 1260 DEBUGOUT("Error while checking for link\n");
1221 return ret_val; 1261 return ret_val;
1222 } 1262 }
@@ -1250,7 +1290,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1250 * the PHY speed and duplex configuration is. In addition, we need to 1290 * the PHY speed and duplex configuration is. In addition, we need to
1251 * perform a hardware reset on the PHY to take it out of reset. 1291 * perform a hardware reset on the PHY to take it out of reset.
1252 */ 1292 */
1253 if(hw->mac_type > e1000_82543) { 1293 if (hw->mac_type > e1000_82543) {
1254 ctrl |= E1000_CTRL_SLU; 1294 ctrl |= E1000_CTRL_SLU;
1255 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1295 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1256 E1000_WRITE_REG(hw, CTRL, ctrl); 1296 E1000_WRITE_REG(hw, CTRL, ctrl);
@@ -1258,13 +1298,13 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1258 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); 1298 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
1259 E1000_WRITE_REG(hw, CTRL, ctrl); 1299 E1000_WRITE_REG(hw, CTRL, ctrl);
1260 ret_val = e1000_phy_hw_reset(hw); 1300 ret_val = e1000_phy_hw_reset(hw);
1261 if(ret_val) 1301 if (ret_val)
1262 return ret_val; 1302 return ret_val;
1263 } 1303 }
1264 1304
1265 /* Make sure we have a valid PHY */ 1305 /* Make sure we have a valid PHY */
1266 ret_val = e1000_detect_gig_phy(hw); 1306 ret_val = e1000_detect_gig_phy(hw);
1267 if(ret_val) { 1307 if (ret_val) {
1268 DEBUGOUT("Error, did not detect valid phy.\n"); 1308 DEBUGOUT("Error, did not detect valid phy.\n");
1269 return ret_val; 1309 return ret_val;
1270 } 1310 }
@@ -1272,19 +1312,19 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1272 1312
1273 /* Set PHY to class A mode (if necessary) */ 1313 /* Set PHY to class A mode (if necessary) */
1274 ret_val = e1000_set_phy_mode(hw); 1314 ret_val = e1000_set_phy_mode(hw);
1275 if(ret_val) 1315 if (ret_val)
1276 return ret_val; 1316 return ret_val;
1277 1317
1278 if((hw->mac_type == e1000_82545_rev_3) || 1318 if ((hw->mac_type == e1000_82545_rev_3) ||
1279 (hw->mac_type == e1000_82546_rev_3)) { 1319 (hw->mac_type == e1000_82546_rev_3)) {
1280 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1320 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1281 phy_data |= 0x00000008; 1321 phy_data |= 0x00000008;
1282 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1322 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1283 } 1323 }
1284 1324
1285 if(hw->mac_type <= e1000_82543 || 1325 if (hw->mac_type <= e1000_82543 ||
1286 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || 1326 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1287 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 1327 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
1288 hw->phy_reset_disable = FALSE; 1328 hw->phy_reset_disable = FALSE;
1289 1329
1290 return E1000_SUCCESS; 1330 return E1000_SUCCESS;
@@ -1314,7 +1354,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1314 return ret_val; 1354 return ret_val;
1315 } 1355 }
1316 1356
1317 /* Wait 10ms for MAC to configure PHY from eeprom settings */ 1357 /* Wait 15ms for MAC to configure PHY from eeprom settings */
1318 msec_delay(15); 1358 msec_delay(15);
1319 if (hw->mac_type != e1000_ich8lan) { 1359 if (hw->mac_type != e1000_ich8lan) {
1320 /* Configure activity LED after PHY reset */ 1360 /* Configure activity LED after PHY reset */
@@ -1324,11 +1364,14 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1324 E1000_WRITE_REG(hw, LEDCTL, led_ctrl); 1364 E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
1325 } 1365 }
1326 1366
1327 /* disable lplu d3 during driver init */ 1367 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
1328 ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1368 if (hw->phy_type == e1000_phy_igp) {
1329 if (ret_val) { 1369 /* disable lplu d3 during driver init */
1330 DEBUGOUT("Error Disabling LPLU D3\n"); 1370 ret_val = e1000_set_d3_lplu_state(hw, FALSE);
1331 return ret_val; 1371 if (ret_val) {
1372 DEBUGOUT("Error Disabling LPLU D3\n");
1373 return ret_val;
1374 }
1332 } 1375 }
1333 1376
1334 /* disable lplu d0 during driver init */ 1377 /* disable lplu d0 during driver init */
@@ -1366,45 +1409,45 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1366 } 1409 }
1367 } 1410 }
1368 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); 1411 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
1369 if(ret_val) 1412 if (ret_val)
1370 return ret_val; 1413 return ret_val;
1371 1414
1372 /* set auto-master slave resolution settings */ 1415 /* set auto-master slave resolution settings */
1373 if(hw->autoneg) { 1416 if (hw->autoneg) {
1374 e1000_ms_type phy_ms_setting = hw->master_slave; 1417 e1000_ms_type phy_ms_setting = hw->master_slave;
1375 1418
1376 if(hw->ffe_config_state == e1000_ffe_config_active) 1419 if (hw->ffe_config_state == e1000_ffe_config_active)
1377 hw->ffe_config_state = e1000_ffe_config_enabled; 1420 hw->ffe_config_state = e1000_ffe_config_enabled;
1378 1421
1379 if(hw->dsp_config_state == e1000_dsp_config_activated) 1422 if (hw->dsp_config_state == e1000_dsp_config_activated)
1380 hw->dsp_config_state = e1000_dsp_config_enabled; 1423 hw->dsp_config_state = e1000_dsp_config_enabled;
1381 1424
1382 /* when autonegotiation advertisment is only 1000Mbps then we 1425 /* when autonegotiation advertisment is only 1000Mbps then we
1383 * should disable SmartSpeed and enable Auto MasterSlave 1426 * should disable SmartSpeed and enable Auto MasterSlave
1384 * resolution as hardware default. */ 1427 * resolution as hardware default. */
1385 if(hw->autoneg_advertised == ADVERTISE_1000_FULL) { 1428 if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
1386 /* Disable SmartSpeed */ 1429 /* Disable SmartSpeed */
1387 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 1430 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1388 if(ret_val) 1431 &phy_data);
1432 if (ret_val)
1389 return ret_val; 1433 return ret_val;
1390 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1434 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1391 ret_val = e1000_write_phy_reg(hw, 1435 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1392 IGP01E1000_PHY_PORT_CONFIG, 1436 phy_data);
1393 phy_data); 1437 if (ret_val)
1394 if(ret_val)
1395 return ret_val; 1438 return ret_val;
1396 /* Set auto Master/Slave resolution process */ 1439 /* Set auto Master/Slave resolution process */
1397 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1440 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1398 if(ret_val) 1441 if (ret_val)
1399 return ret_val; 1442 return ret_val;
1400 phy_data &= ~CR_1000T_MS_ENABLE; 1443 phy_data &= ~CR_1000T_MS_ENABLE;
1401 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1444 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1402 if(ret_val) 1445 if (ret_val)
1403 return ret_val; 1446 return ret_val;
1404 } 1447 }
1405 1448
1406 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); 1449 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
1407 if(ret_val) 1450 if (ret_val)
1408 return ret_val; 1451 return ret_val;
1409 1452
1410 /* load defaults for future use */ 1453 /* load defaults for future use */
@@ -1428,7 +1471,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1428 break; 1471 break;
1429 } 1472 }
1430 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); 1473 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
1431 if(ret_val) 1474 if (ret_val)
1432 return ret_val; 1475 return ret_val;
1433 } 1476 }
1434 1477
@@ -1449,12 +1492,12 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1449 1492
1450 DEBUGFUNC("e1000_copper_link_ggp_setup"); 1493 DEBUGFUNC("e1000_copper_link_ggp_setup");
1451 1494
1452 if(!hw->phy_reset_disable) { 1495 if (!hw->phy_reset_disable) {
1453 1496
1454 /* Enable CRS on TX for half-duplex operation. */ 1497 /* Enable CRS on TX for half-duplex operation. */
1455 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 1498 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
1456 &phy_data); 1499 &phy_data);
1457 if(ret_val) 1500 if (ret_val)
1458 return ret_val; 1501 return ret_val;
1459 1502
1460 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; 1503 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -1463,7 +1506,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1463 1506
1464 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, 1507 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
1465 phy_data); 1508 phy_data);
1466 if(ret_val) 1509 if (ret_val)
1467 return ret_val; 1510 return ret_val;
1468 1511
1469 /* Options: 1512 /* Options:
@@ -1474,7 +1517,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1474 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) 1517 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
1475 */ 1518 */
1476 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data); 1519 ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
1477 if(ret_val) 1520 if (ret_val)
1478 return ret_val; 1521 return ret_val;
1479 1522
1480 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; 1523 phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
@@ -1499,11 +1542,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1499 * 1 - Enabled 1542 * 1 - Enabled
1500 */ 1543 */
1501 phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; 1544 phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
1502 if(hw->disable_polarity_correction == 1) 1545 if (hw->disable_polarity_correction == 1)
1503 phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; 1546 phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
1504 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data); 1547 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
1505 1548
1506 if(ret_val) 1549 if (ret_val)
1507 return ret_val; 1550 return ret_val;
1508 1551
1509 /* SW Reset the PHY so all changes take effect */ 1552 /* SW Reset the PHY so all changes take effect */
@@ -1559,9 +1602,9 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1559 return ret_val; 1602 return ret_val;
1560 1603
1561 phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; 1604 phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
1562
1563 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, 1605 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
1564 phy_data); 1606 phy_data);
1607
1565 if (ret_val) 1608 if (ret_val)
1566 return ret_val; 1609 return ret_val;
1567 } 1610 }
@@ -1596,12 +1639,12 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1596 1639
1597 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1640 DEBUGFUNC("e1000_copper_link_mgp_setup");
1598 1641
1599 if(hw->phy_reset_disable) 1642 if (hw->phy_reset_disable)
1600 return E1000_SUCCESS; 1643 return E1000_SUCCESS;
1601 1644
1602 /* Enable CRS on TX. This must be set for half-duplex operation. */ 1645 /* Enable CRS on TX. This must be set for half-duplex operation. */
1603 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1646 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
1604 if(ret_val) 1647 if (ret_val)
1605 return ret_val; 1648 return ret_val;
1606 1649
1607 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 1650 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
@@ -1638,7 +1681,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1638 * 1 - Enabled 1681 * 1 - Enabled
1639 */ 1682 */
1640 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 1683 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
1641 if(hw->disable_polarity_correction == 1) 1684 if (hw->disable_polarity_correction == 1)
1642 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 1685 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
1643 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 1686 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
1644 if (ret_val) 1687 if (ret_val)
@@ -1678,7 +1721,7 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1678 1721
1679 /* SW Reset the PHY so all changes take effect */ 1722 /* SW Reset the PHY so all changes take effect */
1680 ret_val = e1000_phy_reset(hw); 1723 ret_val = e1000_phy_reset(hw);
1681 if(ret_val) { 1724 if (ret_val) {
1682 DEBUGOUT("Error Resetting the PHY\n"); 1725 DEBUGOUT("Error Resetting the PHY\n");
1683 return ret_val; 1726 return ret_val;
1684 } 1727 }
@@ -1708,7 +1751,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1708 /* If autoneg_advertised is zero, we assume it was not defaulted 1751 /* If autoneg_advertised is zero, we assume it was not defaulted
1709 * by the calling code so we set to advertise full capability. 1752 * by the calling code so we set to advertise full capability.
1710 */ 1753 */
1711 if(hw->autoneg_advertised == 0) 1754 if (hw->autoneg_advertised == 0)
1712 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1755 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1713 1756
1714 /* IFE phy only supports 10/100 */ 1757 /* IFE phy only supports 10/100 */
@@ -1717,7 +1760,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1717 1760
1718 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1761 DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
1719 ret_val = e1000_phy_setup_autoneg(hw); 1762 ret_val = e1000_phy_setup_autoneg(hw);
1720 if(ret_val) { 1763 if (ret_val) {
1721 DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1764 DEBUGOUT("Error Setting up Auto-Negotiation\n");
1722 return ret_val; 1765 return ret_val;
1723 } 1766 }
@@ -1727,20 +1770,20 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1727 * the Auto Neg Restart bit in the PHY control register. 1770 * the Auto Neg Restart bit in the PHY control register.
1728 */ 1771 */
1729 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 1772 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1730 if(ret_val) 1773 if (ret_val)
1731 return ret_val; 1774 return ret_val;
1732 1775
1733 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); 1776 phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
1734 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 1777 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
1735 if(ret_val) 1778 if (ret_val)
1736 return ret_val; 1779 return ret_val;
1737 1780
1738 /* Does the user want to wait for Auto-Neg to complete here, or 1781 /* Does the user want to wait for Auto-Neg to complete here, or
1739 * check at a later time (for example, callback routine). 1782 * check at a later time (for example, callback routine).
1740 */ 1783 */
1741 if(hw->wait_autoneg_complete) { 1784 if (hw->wait_autoneg_complete) {
1742 ret_val = e1000_wait_autoneg(hw); 1785 ret_val = e1000_wait_autoneg(hw);
1743 if(ret_val) { 1786 if (ret_val) {
1744 DEBUGOUT("Error while waiting for autoneg to complete\n"); 1787 DEBUGOUT("Error while waiting for autoneg to complete\n");
1745 return ret_val; 1788 return ret_val;
1746 } 1789 }
@@ -1751,7 +1794,6 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1751 return E1000_SUCCESS; 1794 return E1000_SUCCESS;
1752} 1795}
1753 1796
1754
1755/****************************************************************************** 1797/******************************************************************************
1756* Config the MAC and the PHY after link is up. 1798* Config the MAC and the PHY after link is up.
1757* 1) Set up the MAC to the current PHY speed/duplex 1799* 1) Set up the MAC to the current PHY speed/duplex
@@ -1770,25 +1812,25 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
1770 int32_t ret_val; 1812 int32_t ret_val;
1771 DEBUGFUNC("e1000_copper_link_postconfig"); 1813 DEBUGFUNC("e1000_copper_link_postconfig");
1772 1814
1773 if(hw->mac_type >= e1000_82544) { 1815 if (hw->mac_type >= e1000_82544) {
1774 e1000_config_collision_dist(hw); 1816 e1000_config_collision_dist(hw);
1775 } else { 1817 } else {
1776 ret_val = e1000_config_mac_to_phy(hw); 1818 ret_val = e1000_config_mac_to_phy(hw);
1777 if(ret_val) { 1819 if (ret_val) {
1778 DEBUGOUT("Error configuring MAC to PHY settings\n"); 1820 DEBUGOUT("Error configuring MAC to PHY settings\n");
1779 return ret_val; 1821 return ret_val;
1780 } 1822 }
1781 } 1823 }
1782 ret_val = e1000_config_fc_after_link_up(hw); 1824 ret_val = e1000_config_fc_after_link_up(hw);
1783 if(ret_val) { 1825 if (ret_val) {
1784 DEBUGOUT("Error Configuring Flow Control\n"); 1826 DEBUGOUT("Error Configuring Flow Control\n");
1785 return ret_val; 1827 return ret_val;
1786 } 1828 }
1787 1829
1788 /* Config DSP to improve Giga link quality */ 1830 /* Config DSP to improve Giga link quality */
1789 if(hw->phy_type == e1000_phy_igp) { 1831 if (hw->phy_type == e1000_phy_igp) {
1790 ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1832 ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
1791 if(ret_val) { 1833 if (ret_val) {
1792 DEBUGOUT("Error Configuring DSP after link up\n"); 1834 DEBUGOUT("Error Configuring DSP after link up\n");
1793 return ret_val; 1835 return ret_val;
1794 } 1836 }
@@ -1834,7 +1876,7 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1834 1876
1835 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1877 /* Check if it is a valid PHY and set PHY mode if necessary. */
1836 ret_val = e1000_copper_link_preconfig(hw); 1878 ret_val = e1000_copper_link_preconfig(hw);
1837 if(ret_val) 1879 if (ret_val)
1838 return ret_val; 1880 return ret_val;
1839 1881
1840 switch (hw->mac_type) { 1882 switch (hw->mac_type) {
@@ -1855,30 +1897,30 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1855 hw->phy_type == e1000_phy_igp_3 || 1897 hw->phy_type == e1000_phy_igp_3 ||
1856 hw->phy_type == e1000_phy_igp_2) { 1898 hw->phy_type == e1000_phy_igp_2) {
1857 ret_val = e1000_copper_link_igp_setup(hw); 1899 ret_val = e1000_copper_link_igp_setup(hw);
1858 if(ret_val) 1900 if (ret_val)
1859 return ret_val; 1901 return ret_val;
1860 } else if (hw->phy_type == e1000_phy_m88) { 1902 } else if (hw->phy_type == e1000_phy_m88) {
1861 ret_val = e1000_copper_link_mgp_setup(hw); 1903 ret_val = e1000_copper_link_mgp_setup(hw);
1862 if(ret_val) 1904 if (ret_val)
1863 return ret_val; 1905 return ret_val;
1864 } else if (hw->phy_type == e1000_phy_gg82563) { 1906 } else if (hw->phy_type == e1000_phy_gg82563) {
1865 ret_val = e1000_copper_link_ggp_setup(hw); 1907 ret_val = e1000_copper_link_ggp_setup(hw);
1866 if(ret_val) 1908 if (ret_val)
1867 return ret_val; 1909 return ret_val;
1868 } 1910 }
1869 1911
1870 if(hw->autoneg) { 1912 if (hw->autoneg) {
1871 /* Setup autoneg and flow control advertisement 1913 /* Setup autoneg and flow control advertisement
1872 * and perform autonegotiation */ 1914 * and perform autonegotiation */
1873 ret_val = e1000_copper_link_autoneg(hw); 1915 ret_val = e1000_copper_link_autoneg(hw);
1874 if(ret_val) 1916 if (ret_val)
1875 return ret_val; 1917 return ret_val;
1876 } else { 1918 } else {
1877 /* PHY will be set to 10H, 10F, 100H,or 100F 1919 /* PHY will be set to 10H, 10F, 100H,or 100F
1878 * depending on value from forced_speed_duplex. */ 1920 * depending on value from forced_speed_duplex. */
1879 DEBUGOUT("Forcing speed and duplex\n"); 1921 DEBUGOUT("Forcing speed and duplex\n");
1880 ret_val = e1000_phy_force_speed_duplex(hw); 1922 ret_val = e1000_phy_force_speed_duplex(hw);
1881 if(ret_val) { 1923 if (ret_val) {
1882 DEBUGOUT("Error Forcing Speed and Duplex\n"); 1924 DEBUGOUT("Error Forcing Speed and Duplex\n");
1883 return ret_val; 1925 return ret_val;
1884 } 1926 }
@@ -1887,18 +1929,18 @@ e1000_setup_copper_link(struct e1000_hw *hw)
1887 /* Check link status. Wait up to 100 microseconds for link to become 1929 /* Check link status. Wait up to 100 microseconds for link to become
1888 * valid. 1930 * valid.
1889 */ 1931 */
1890 for(i = 0; i < 10; i++) { 1932 for (i = 0; i < 10; i++) {
1891 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 1933 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
1892 if(ret_val) 1934 if (ret_val)
1893 return ret_val; 1935 return ret_val;
1894 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 1936 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
1895 if(ret_val) 1937 if (ret_val)
1896 return ret_val; 1938 return ret_val;
1897 1939
1898 if(phy_data & MII_SR_LINK_STATUS) { 1940 if (phy_data & MII_SR_LINK_STATUS) {
1899 /* Config the MAC and PHY after link is up */ 1941 /* Config the MAC and PHY after link is up */
1900 ret_val = e1000_copper_link_postconfig(hw); 1942 ret_val = e1000_copper_link_postconfig(hw);
1901 if(ret_val) 1943 if (ret_val)
1902 return ret_val; 1944 return ret_val;
1903 1945
1904 DEBUGOUT("Valid link established!!!\n"); 1946 DEBUGOUT("Valid link established!!!\n");
@@ -2000,7 +2042,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
2000 2042
2001 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 2043 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
2002 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); 2044 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
2003 if(ret_val) 2045 if (ret_val)
2004 return ret_val; 2046 return ret_val;
2005 2047
2006 if (hw->phy_type != e1000_phy_ife) { 2048 if (hw->phy_type != e1000_phy_ife) {
@@ -2028,36 +2070,36 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
2028 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised); 2070 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
2029 2071
2030 /* Do we want to advertise 10 Mb Half Duplex? */ 2072 /* Do we want to advertise 10 Mb Half Duplex? */
2031 if(hw->autoneg_advertised & ADVERTISE_10_HALF) { 2073 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
2032 DEBUGOUT("Advertise 10mb Half duplex\n"); 2074 DEBUGOUT("Advertise 10mb Half duplex\n");
2033 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; 2075 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
2034 } 2076 }
2035 2077
2036 /* Do we want to advertise 10 Mb Full Duplex? */ 2078 /* Do we want to advertise 10 Mb Full Duplex? */
2037 if(hw->autoneg_advertised & ADVERTISE_10_FULL) { 2079 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
2038 DEBUGOUT("Advertise 10mb Full duplex\n"); 2080 DEBUGOUT("Advertise 10mb Full duplex\n");
2039 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; 2081 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
2040 } 2082 }
2041 2083
2042 /* Do we want to advertise 100 Mb Half Duplex? */ 2084 /* Do we want to advertise 100 Mb Half Duplex? */
2043 if(hw->autoneg_advertised & ADVERTISE_100_HALF) { 2085 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
2044 DEBUGOUT("Advertise 100mb Half duplex\n"); 2086 DEBUGOUT("Advertise 100mb Half duplex\n");
2045 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; 2087 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
2046 } 2088 }
2047 2089
2048 /* Do we want to advertise 100 Mb Full Duplex? */ 2090 /* Do we want to advertise 100 Mb Full Duplex? */
2049 if(hw->autoneg_advertised & ADVERTISE_100_FULL) { 2091 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
2050 DEBUGOUT("Advertise 100mb Full duplex\n"); 2092 DEBUGOUT("Advertise 100mb Full duplex\n");
2051 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; 2093 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
2052 } 2094 }
2053 2095
2054 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ 2096 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
2055 if(hw->autoneg_advertised & ADVERTISE_1000_HALF) { 2097 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
2056 DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n"); 2098 DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
2057 } 2099 }
2058 2100
2059 /* Do we want to advertise 1000 Mb Full Duplex? */ 2101 /* Do we want to advertise 1000 Mb Full Duplex? */
2060 if(hw->autoneg_advertised & ADVERTISE_1000_FULL) { 2102 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
2061 DEBUGOUT("Advertise 1000mb Full duplex\n"); 2103 DEBUGOUT("Advertise 1000mb Full duplex\n");
2062 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 2104 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
2063 if (hw->phy_type == e1000_phy_ife) { 2105 if (hw->phy_type == e1000_phy_ife) {
@@ -2119,7 +2161,7 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
2119 } 2161 }
2120 2162
2121 ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); 2163 ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
2122 if(ret_val) 2164 if (ret_val)
2123 return ret_val; 2165 return ret_val;
2124 2166
2125 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 2167 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
@@ -2167,7 +2209,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2167 2209
2168 /* Read the MII Control Register. */ 2210 /* Read the MII Control Register. */
2169 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); 2211 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
2170 if(ret_val) 2212 if (ret_val)
2171 return ret_val; 2213 return ret_val;
2172 2214
2173 /* We need to disable autoneg in order to force link and duplex. */ 2215 /* We need to disable autoneg in order to force link and duplex. */
@@ -2175,8 +2217,8 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2175 mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; 2217 mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
2176 2218
2177 /* Are we forcing Full or Half Duplex? */ 2219 /* Are we forcing Full or Half Duplex? */
2178 if(hw->forced_speed_duplex == e1000_100_full || 2220 if (hw->forced_speed_duplex == e1000_100_full ||
2179 hw->forced_speed_duplex == e1000_10_full) { 2221 hw->forced_speed_duplex == e1000_10_full) {
2180 /* We want to force full duplex so we SET the full duplex bits in the 2222 /* We want to force full duplex so we SET the full duplex bits in the
2181 * Device and MII Control Registers. 2223 * Device and MII Control Registers.
2182 */ 2224 */
@@ -2193,7 +2235,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2193 } 2235 }
2194 2236
2195 /* Are we forcing 100Mbps??? */ 2237 /* Are we forcing 100Mbps??? */
2196 if(hw->forced_speed_duplex == e1000_100_full || 2238 if (hw->forced_speed_duplex == e1000_100_full ||
2197 hw->forced_speed_duplex == e1000_100_half) { 2239 hw->forced_speed_duplex == e1000_100_half) {
2198 /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ 2240 /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
2199 ctrl |= E1000_CTRL_SPD_100; 2241 ctrl |= E1000_CTRL_SPD_100;
@@ -2216,7 +2258,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2216 if ((hw->phy_type == e1000_phy_m88) || 2258 if ((hw->phy_type == e1000_phy_m88) ||
2217 (hw->phy_type == e1000_phy_gg82563)) { 2259 (hw->phy_type == e1000_phy_gg82563)) {
2218 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 2260 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2219 if(ret_val) 2261 if (ret_val)
2220 return ret_val; 2262 return ret_val;
2221 2263
2222 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 2264 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
@@ -2224,7 +2266,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2224 */ 2266 */
2225 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; 2267 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
2226 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 2268 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
2227 if(ret_val) 2269 if (ret_val)
2228 return ret_val; 2270 return ret_val;
2229 2271
2230 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); 2272 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
@@ -2248,20 +2290,20 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2248 * forced whenever speed or duplex are forced. 2290 * forced whenever speed or duplex are forced.
2249 */ 2291 */
2250 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 2292 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
2251 if(ret_val) 2293 if (ret_val)
2252 return ret_val; 2294 return ret_val;
2253 2295
2254 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; 2296 phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
2255 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; 2297 phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
2256 2298
2257 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); 2299 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
2258 if(ret_val) 2300 if (ret_val)
2259 return ret_val; 2301 return ret_val;
2260 } 2302 }
2261 2303
2262 /* Write back the modified PHY MII control register. */ 2304 /* Write back the modified PHY MII control register. */
2263 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); 2305 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
2264 if(ret_val) 2306 if (ret_val)
2265 return ret_val; 2307 return ret_val;
2266 2308
2267 udelay(1); 2309 udelay(1);
@@ -2273,50 +2315,50 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2273 * only if the user has set wait_autoneg_complete to 1, which is 2315 * only if the user has set wait_autoneg_complete to 1, which is
2274 * the default. 2316 * the default.
2275 */ 2317 */
2276 if(hw->wait_autoneg_complete) { 2318 if (hw->wait_autoneg_complete) {
2277 /* We will wait for autoneg to complete. */ 2319 /* We will wait for autoneg to complete. */
2278 DEBUGOUT("Waiting for forced speed/duplex link.\n"); 2320 DEBUGOUT("Waiting for forced speed/duplex link.\n");
2279 mii_status_reg = 0; 2321 mii_status_reg = 0;
2280 2322
2281 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 2323 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
2282 for(i = PHY_FORCE_TIME; i > 0; i--) { 2324 for (i = PHY_FORCE_TIME; i > 0; i--) {
2283 /* Read the MII Status Register and wait for Auto-Neg Complete bit 2325 /* Read the MII Status Register and wait for Auto-Neg Complete bit
2284 * to be set. 2326 * to be set.
2285 */ 2327 */
2286 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2328 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2287 if(ret_val) 2329 if (ret_val)
2288 return ret_val; 2330 return ret_val;
2289 2331
2290 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2332 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2291 if(ret_val) 2333 if (ret_val)
2292 return ret_val; 2334 return ret_val;
2293 2335
2294 if(mii_status_reg & MII_SR_LINK_STATUS) break; 2336 if (mii_status_reg & MII_SR_LINK_STATUS) break;
2295 msec_delay(100); 2337 msec_delay(100);
2296 } 2338 }
2297 if((i == 0) && 2339 if ((i == 0) &&
2298 ((hw->phy_type == e1000_phy_m88) || 2340 ((hw->phy_type == e1000_phy_m88) ||
2299 (hw->phy_type == e1000_phy_gg82563))) { 2341 (hw->phy_type == e1000_phy_gg82563))) {
2300 /* We didn't get link. Reset the DSP and wait again for link. */ 2342 /* We didn't get link. Reset the DSP and wait again for link. */
2301 ret_val = e1000_phy_reset_dsp(hw); 2343 ret_val = e1000_phy_reset_dsp(hw);
2302 if(ret_val) { 2344 if (ret_val) {
2303 DEBUGOUT("Error Resetting PHY DSP\n"); 2345 DEBUGOUT("Error Resetting PHY DSP\n");
2304 return ret_val; 2346 return ret_val;
2305 } 2347 }
2306 } 2348 }
2307 /* This loop will early-out if the link condition has been met. */ 2349 /* This loop will early-out if the link condition has been met. */
2308 for(i = PHY_FORCE_TIME; i > 0; i--) { 2350 for (i = PHY_FORCE_TIME; i > 0; i--) {
2309 if(mii_status_reg & MII_SR_LINK_STATUS) break; 2351 if (mii_status_reg & MII_SR_LINK_STATUS) break;
2310 msec_delay(100); 2352 msec_delay(100);
2311 /* Read the MII Status Register and wait for Auto-Neg Complete bit 2353 /* Read the MII Status Register and wait for Auto-Neg Complete bit
2312 * to be set. 2354 * to be set.
2313 */ 2355 */
2314 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2356 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2315 if(ret_val) 2357 if (ret_val)
2316 return ret_val; 2358 return ret_val;
2317 2359
2318 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2360 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2319 if(ret_val) 2361 if (ret_val)
2320 return ret_val; 2362 return ret_val;
2321 } 2363 }
2322 } 2364 }
@@ -2327,32 +2369,31 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2327 * defaults back to a 2.5MHz clock when the PHY is reset. 2369 * defaults back to a 2.5MHz clock when the PHY is reset.
2328 */ 2370 */
2329 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 2371 ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
2330 if(ret_val) 2372 if (ret_val)
2331 return ret_val; 2373 return ret_val;
2332 2374
2333 phy_data |= M88E1000_EPSCR_TX_CLK_25; 2375 phy_data |= M88E1000_EPSCR_TX_CLK_25;
2334 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); 2376 ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
2335 if(ret_val) 2377 if (ret_val)
2336 return ret_val; 2378 return ret_val;
2337 2379
2338 /* In addition, because of the s/w reset above, we need to enable CRS on 2380 /* In addition, because of the s/w reset above, we need to enable CRS on
2339 * TX. This must be set for both full and half duplex operation. 2381 * TX. This must be set for both full and half duplex operation.
2340 */ 2382 */
2341 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 2383 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
2342 if(ret_val) 2384 if (ret_val)
2343 return ret_val; 2385 return ret_val;
2344 2386
2345 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 2387 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
2346 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 2388 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
2347 if(ret_val) 2389 if (ret_val)
2348 return ret_val; 2390 return ret_val;
2349 2391
2350 if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && 2392 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
2351 (!hw->autoneg) && 2393 (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
2352 (hw->forced_speed_duplex == e1000_10_full || 2394 hw->forced_speed_duplex == e1000_10_half)) {
2353 hw->forced_speed_duplex == e1000_10_half)) {
2354 ret_val = e1000_polarity_reversal_workaround(hw); 2395 ret_val = e1000_polarity_reversal_workaround(hw);
2355 if(ret_val) 2396 if (ret_val)
2356 return ret_val; 2397 return ret_val;
2357 } 2398 }
2358 } else if (hw->phy_type == e1000_phy_gg82563) { 2399 } else if (hw->phy_type == e1000_phy_gg82563) {
@@ -2443,10 +2484,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
2443 * registers depending on negotiated values. 2484 * registers depending on negotiated values.
2444 */ 2485 */
2445 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 2486 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
2446 if(ret_val) 2487 if (ret_val)
2447 return ret_val; 2488 return ret_val;
2448 2489
2449 if(phy_data & M88E1000_PSSR_DPLX) 2490 if (phy_data & M88E1000_PSSR_DPLX)
2450 ctrl |= E1000_CTRL_FD; 2491 ctrl |= E1000_CTRL_FD;
2451 else 2492 else
2452 ctrl &= ~E1000_CTRL_FD; 2493 ctrl &= ~E1000_CTRL_FD;
@@ -2456,9 +2497,9 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
2456 /* Set up speed in the Device Control register depending on 2497 /* Set up speed in the Device Control register depending on
2457 * negotiated values. 2498 * negotiated values.
2458 */ 2499 */
2459 if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 2500 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2460 ctrl |= E1000_CTRL_SPD_1000; 2501 ctrl |= E1000_CTRL_SPD_1000;
2461 else if((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 2502 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
2462 ctrl |= E1000_CTRL_SPD_100; 2503 ctrl |= E1000_CTRL_SPD_100;
2463 2504
2464 /* Write the configured values back to the Device Control Reg. */ 2505 /* Write the configured values back to the Device Control Reg. */
@@ -2526,7 +2567,7 @@ e1000_force_mac_fc(struct e1000_hw *hw)
2526 } 2567 }
2527 2568
2528 /* Disable TX Flow Control for 82542 (rev 2.0) */ 2569 /* Disable TX Flow Control for 82542 (rev 2.0) */
2529 if(hw->mac_type == e1000_82542_rev2_0) 2570 if (hw->mac_type == e1000_82542_rev2_0)
2530 ctrl &= (~E1000_CTRL_TFCE); 2571 ctrl &= (~E1000_CTRL_TFCE);
2531 2572
2532 E1000_WRITE_REG(hw, CTRL, ctrl); 2573 E1000_WRITE_REG(hw, CTRL, ctrl);
@@ -2560,11 +2601,12 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2560 * so we had to force link. In this case, we need to force the 2601 * so we had to force link. In this case, we need to force the
2561 * configuration of the MAC to match the "fc" parameter. 2602 * configuration of the MAC to match the "fc" parameter.
2562 */ 2603 */
2563 if(((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) || 2604 if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
2564 ((hw->media_type == e1000_media_type_internal_serdes) && (hw->autoneg_failed)) || 2605 ((hw->media_type == e1000_media_type_internal_serdes) &&
2565 ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) { 2606 (hw->autoneg_failed)) ||
2607 ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
2566 ret_val = e1000_force_mac_fc(hw); 2608 ret_val = e1000_force_mac_fc(hw);
2567 if(ret_val) { 2609 if (ret_val) {
2568 DEBUGOUT("Error forcing flow control settings\n"); 2610 DEBUGOUT("Error forcing flow control settings\n");
2569 return ret_val; 2611 return ret_val;
2570 } 2612 }
@@ -2575,19 +2617,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2575 * has completed, and if so, how the PHY and link partner has 2617 * has completed, and if so, how the PHY and link partner has
2576 * flow control configured. 2618 * flow control configured.
2577 */ 2619 */
2578 if((hw->media_type == e1000_media_type_copper) && hw->autoneg) { 2620 if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
2579 /* Read the MII Status Register and check to see if AutoNeg 2621 /* Read the MII Status Register and check to see if AutoNeg
2580 * has completed. We read this twice because this reg has 2622 * has completed. We read this twice because this reg has
2581 * some "sticky" (latched) bits. 2623 * some "sticky" (latched) bits.
2582 */ 2624 */
2583 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2625 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2584 if(ret_val) 2626 if (ret_val)
2585 return ret_val; 2627 return ret_val;
2586 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 2628 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
2587 if(ret_val) 2629 if (ret_val)
2588 return ret_val; 2630 return ret_val;
2589 2631
2590 if(mii_status_reg & MII_SR_AUTONEG_COMPLETE) { 2632 if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
2591 /* The AutoNeg process has completed, so we now need to 2633 /* The AutoNeg process has completed, so we now need to
2592 * read both the Auto Negotiation Advertisement Register 2634 * read both the Auto Negotiation Advertisement Register
2593 * (Address 4) and the Auto_Negotiation Base Page Ability 2635 * (Address 4) and the Auto_Negotiation Base Page Ability
@@ -2596,11 +2638,11 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2596 */ 2638 */
2597 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, 2639 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
2598 &mii_nway_adv_reg); 2640 &mii_nway_adv_reg);
2599 if(ret_val) 2641 if (ret_val)
2600 return ret_val; 2642 return ret_val;
2601 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, 2643 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
2602 &mii_nway_lp_ability_reg); 2644 &mii_nway_lp_ability_reg);
2603 if(ret_val) 2645 if (ret_val)
2604 return ret_val; 2646 return ret_val;
2605 2647
2606 /* Two bits in the Auto Negotiation Advertisement Register 2648 /* Two bits in the Auto Negotiation Advertisement Register
@@ -2637,15 +2679,15 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2637 * 1 | DC | 1 | DC | e1000_fc_full 2679 * 1 | DC | 1 | DC | e1000_fc_full
2638 * 2680 *
2639 */ 2681 */
2640 if((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2682 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2641 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 2683 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
2642 /* Now we need to check if the user selected RX ONLY 2684 /* Now we need to check if the user selected RX ONLY
2643 * of pause frames. In this case, we had to advertise 2685 * of pause frames. In this case, we had to advertise
2644 * FULL flow control because we could not advertise RX 2686 * FULL flow control because we could not advertise RX
2645 * ONLY. Hence, we must now check to see if we need to 2687 * ONLY. Hence, we must now check to see if we need to
2646 * turn OFF the TRANSMISSION of PAUSE frames. 2688 * turn OFF the TRANSMISSION of PAUSE frames.
2647 */ 2689 */
2648 if(hw->original_fc == e1000_fc_full) { 2690 if (hw->original_fc == e1000_fc_full) {
2649 hw->fc = e1000_fc_full; 2691 hw->fc = e1000_fc_full;
2650 DEBUGOUT("Flow Control = FULL.\n"); 2692 DEBUGOUT("Flow Control = FULL.\n");
2651 } else { 2693 } else {
@@ -2661,10 +2703,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2661 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause 2703 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
2662 * 2704 *
2663 */ 2705 */
2664 else if(!(mii_nway_adv_reg & NWAY_AR_PAUSE) && 2706 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2665 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 2707 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
2666 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 2708 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
2667 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 2709 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
2668 hw->fc = e1000_fc_tx_pause; 2710 hw->fc = e1000_fc_tx_pause;
2669 DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); 2711 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2670 } 2712 }
@@ -2676,10 +2718,10 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2676 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 2718 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
2677 * 2719 *
2678 */ 2720 */
2679 else if((mii_nway_adv_reg & NWAY_AR_PAUSE) && 2721 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
2680 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 2722 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
2681 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 2723 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
2682 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 2724 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
2683 hw->fc = e1000_fc_rx_pause; 2725 hw->fc = e1000_fc_rx_pause;
2684 DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); 2726 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2685 } 2727 }
@@ -2703,9 +2745,9 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2703 * be asked to delay transmission of packets than asking 2745 * be asked to delay transmission of packets than asking
2704 * our link partner to pause transmission of frames. 2746 * our link partner to pause transmission of frames.
2705 */ 2747 */
2706 else if((hw->original_fc == e1000_fc_none || 2748 else if ((hw->original_fc == e1000_fc_none ||
2707 hw->original_fc == e1000_fc_tx_pause) || 2749 hw->original_fc == e1000_fc_tx_pause) ||
2708 hw->fc_strict_ieee) { 2750 hw->fc_strict_ieee) {
2709 hw->fc = e1000_fc_none; 2751 hw->fc = e1000_fc_none;
2710 DEBUGOUT("Flow Control = NONE.\n"); 2752 DEBUGOUT("Flow Control = NONE.\n");
2711 } else { 2753 } else {
@@ -2718,19 +2760,19 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2718 * enabled per IEEE 802.3 spec. 2760 * enabled per IEEE 802.3 spec.
2719 */ 2761 */
2720 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 2762 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
2721 if(ret_val) { 2763 if (ret_val) {
2722 DEBUGOUT("Error getting link speed and duplex\n"); 2764 DEBUGOUT("Error getting link speed and duplex\n");
2723 return ret_val; 2765 return ret_val;
2724 } 2766 }
2725 2767
2726 if(duplex == HALF_DUPLEX) 2768 if (duplex == HALF_DUPLEX)
2727 hw->fc = e1000_fc_none; 2769 hw->fc = e1000_fc_none;
2728 2770
2729 /* Now we call a subroutine to actually force the MAC 2771 /* Now we call a subroutine to actually force the MAC
2730 * controller to use the correct flow control settings. 2772 * controller to use the correct flow control settings.
2731 */ 2773 */
2732 ret_val = e1000_force_mac_fc(hw); 2774 ret_val = e1000_force_mac_fc(hw);
2733 if(ret_val) { 2775 if (ret_val) {
2734 DEBUGOUT("Error forcing flow control settings\n"); 2776 DEBUGOUT("Error forcing flow control settings\n");
2735 return ret_val; 2777 return ret_val;
2736 } 2778 }
@@ -2769,13 +2811,13 @@ e1000_check_for_link(struct e1000_hw *hw)
2769 * set when the optics detect a signal. On older adapters, it will be 2811 * set when the optics detect a signal. On older adapters, it will be
2770 * cleared when there is a signal. This applies to fiber media only. 2812 * cleared when there is a signal. This applies to fiber media only.
2771 */ 2813 */
2772 if((hw->media_type == e1000_media_type_fiber) || 2814 if ((hw->media_type == e1000_media_type_fiber) ||
2773 (hw->media_type == e1000_media_type_internal_serdes)) { 2815 (hw->media_type == e1000_media_type_internal_serdes)) {
2774 rxcw = E1000_READ_REG(hw, RXCW); 2816 rxcw = E1000_READ_REG(hw, RXCW);
2775 2817
2776 if(hw->media_type == e1000_media_type_fiber) { 2818 if (hw->media_type == e1000_media_type_fiber) {
2777 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 2819 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
2778 if(status & E1000_STATUS_LU) 2820 if (status & E1000_STATUS_LU)
2779 hw->get_link_status = FALSE; 2821 hw->get_link_status = FALSE;
2780 } 2822 }
2781 } 2823 }
@@ -2786,20 +2828,20 @@ e1000_check_for_link(struct e1000_hw *hw)
2786 * receive a Link Status Change interrupt or we have Rx Sequence 2828 * receive a Link Status Change interrupt or we have Rx Sequence
2787 * Errors. 2829 * Errors.
2788 */ 2830 */
2789 if((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { 2831 if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
2790 /* First we want to see if the MII Status Register reports 2832 /* First we want to see if the MII Status Register reports
2791 * link. If so, then we want to get the current speed/duplex 2833 * link. If so, then we want to get the current speed/duplex
2792 * of the PHY. 2834 * of the PHY.
2793 * Read the register twice since the link bit is sticky. 2835 * Read the register twice since the link bit is sticky.
2794 */ 2836 */
2795 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2837 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2796 if(ret_val) 2838 if (ret_val)
2797 return ret_val; 2839 return ret_val;
2798 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 2840 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
2799 if(ret_val) 2841 if (ret_val)
2800 return ret_val; 2842 return ret_val;
2801 2843
2802 if(phy_data & MII_SR_LINK_STATUS) { 2844 if (phy_data & MII_SR_LINK_STATUS) {
2803 hw->get_link_status = FALSE; 2845 hw->get_link_status = FALSE;
2804 /* Check if there was DownShift, must be checked immediately after 2846 /* Check if there was DownShift, must be checked immediately after
2805 * link-up */ 2847 * link-up */
@@ -2813,10 +2855,10 @@ e1000_check_for_link(struct e1000_hw *hw)
2813 * happen due to the execution of this workaround. 2855 * happen due to the execution of this workaround.
2814 */ 2856 */
2815 2857
2816 if((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) && 2858 if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
2817 (!hw->autoneg) && 2859 (!hw->autoneg) &&
2818 (hw->forced_speed_duplex == e1000_10_full || 2860 (hw->forced_speed_duplex == e1000_10_full ||
2819 hw->forced_speed_duplex == e1000_10_half)) { 2861 hw->forced_speed_duplex == e1000_10_half)) {
2820 E1000_WRITE_REG(hw, IMC, 0xffffffff); 2862 E1000_WRITE_REG(hw, IMC, 0xffffffff);
2821 ret_val = e1000_polarity_reversal_workaround(hw); 2863 ret_val = e1000_polarity_reversal_workaround(hw);
2822 icr = E1000_READ_REG(hw, ICR); 2864 icr = E1000_READ_REG(hw, ICR);
@@ -2833,7 +2875,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2833 /* If we are forcing speed/duplex, then we simply return since 2875 /* If we are forcing speed/duplex, then we simply return since
2834 * we have already determined whether we have link or not. 2876 * we have already determined whether we have link or not.
2835 */ 2877 */
2836 if(!hw->autoneg) return -E1000_ERR_CONFIG; 2878 if (!hw->autoneg) return -E1000_ERR_CONFIG;
2837 2879
2838 /* optimize the dsp settings for the igp phy */ 2880 /* optimize the dsp settings for the igp phy */
2839 e1000_config_dsp_after_link_change(hw, TRUE); 2881 e1000_config_dsp_after_link_change(hw, TRUE);
@@ -2846,11 +2888,11 @@ e1000_check_for_link(struct e1000_hw *hw)
2846 * speed/duplex on the MAC to the current PHY speed/duplex 2888 * speed/duplex on the MAC to the current PHY speed/duplex
2847 * settings. 2889 * settings.
2848 */ 2890 */
2849 if(hw->mac_type >= e1000_82544) 2891 if (hw->mac_type >= e1000_82544)
2850 e1000_config_collision_dist(hw); 2892 e1000_config_collision_dist(hw);
2851 else { 2893 else {
2852 ret_val = e1000_config_mac_to_phy(hw); 2894 ret_val = e1000_config_mac_to_phy(hw);
2853 if(ret_val) { 2895 if (ret_val) {
2854 DEBUGOUT("Error configuring MAC to PHY settings\n"); 2896 DEBUGOUT("Error configuring MAC to PHY settings\n");
2855 return ret_val; 2897 return ret_val;
2856 } 2898 }
@@ -2861,7 +2903,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2861 * have had to re-autoneg with a different link partner. 2903 * have had to re-autoneg with a different link partner.
2862 */ 2904 */
2863 ret_val = e1000_config_fc_after_link_up(hw); 2905 ret_val = e1000_config_fc_after_link_up(hw);
2864 if(ret_val) { 2906 if (ret_val) {
2865 DEBUGOUT("Error configuring flow control\n"); 2907 DEBUGOUT("Error configuring flow control\n");
2866 return ret_val; 2908 return ret_val;
2867 } 2909 }
@@ -2873,7 +2915,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2873 * at gigabit speed, then TBI compatibility is not needed. If we are 2915 * at gigabit speed, then TBI compatibility is not needed. If we are
2874 * at gigabit speed, we turn on TBI compatibility. 2916 * at gigabit speed, we turn on TBI compatibility.
2875 */ 2917 */
2876 if(hw->tbi_compatibility_en) { 2918 if (hw->tbi_compatibility_en) {
2877 uint16_t speed, duplex; 2919 uint16_t speed, duplex;
2878 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 2920 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
2879 if (ret_val) { 2921 if (ret_val) {
@@ -2884,7 +2926,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2884 /* If link speed is not set to gigabit speed, we do not need 2926 /* If link speed is not set to gigabit speed, we do not need
2885 * to enable TBI compatibility. 2927 * to enable TBI compatibility.
2886 */ 2928 */
2887 if(hw->tbi_compatibility_on) { 2929 if (hw->tbi_compatibility_on) {
2888 /* If we previously were in the mode, turn it off. */ 2930 /* If we previously were in the mode, turn it off. */
2889 rctl = E1000_READ_REG(hw, RCTL); 2931 rctl = E1000_READ_REG(hw, RCTL);
2890 rctl &= ~E1000_RCTL_SBP; 2932 rctl &= ~E1000_RCTL_SBP;
@@ -2897,7 +2939,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2897 * packets. Some frames have an additional byte on the end and 2939 * packets. Some frames have an additional byte on the end and
2898 * will look like CRC errors to to the hardware. 2940 * will look like CRC errors to to the hardware.
2899 */ 2941 */
2900 if(!hw->tbi_compatibility_on) { 2942 if (!hw->tbi_compatibility_on) {
2901 hw->tbi_compatibility_on = TRUE; 2943 hw->tbi_compatibility_on = TRUE;
2902 rctl = E1000_READ_REG(hw, RCTL); 2944 rctl = E1000_READ_REG(hw, RCTL);
2903 rctl |= E1000_RCTL_SBP; 2945 rctl |= E1000_RCTL_SBP;
@@ -2913,12 +2955,12 @@ e1000_check_for_link(struct e1000_hw *hw)
2913 * auto-negotiation time to complete, in case the cable was just plugged 2955 * auto-negotiation time to complete, in case the cable was just plugged
2914 * in. The autoneg_failed flag does this. 2956 * in. The autoneg_failed flag does this.
2915 */ 2957 */
2916 else if((((hw->media_type == e1000_media_type_fiber) && 2958 else if ((((hw->media_type == e1000_media_type_fiber) &&
2917 ((ctrl & E1000_CTRL_SWDPIN1) == signal)) || 2959 ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
2918 (hw->media_type == e1000_media_type_internal_serdes)) && 2960 (hw->media_type == e1000_media_type_internal_serdes)) &&
2919 (!(status & E1000_STATUS_LU)) && 2961 (!(status & E1000_STATUS_LU)) &&
2920 (!(rxcw & E1000_RXCW_C))) { 2962 (!(rxcw & E1000_RXCW_C))) {
2921 if(hw->autoneg_failed == 0) { 2963 if (hw->autoneg_failed == 0) {
2922 hw->autoneg_failed = 1; 2964 hw->autoneg_failed = 1;
2923 return 0; 2965 return 0;
2924 } 2966 }
@@ -2934,7 +2976,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2934 2976
2935 /* Configure Flow Control after forcing link up. */ 2977 /* Configure Flow Control after forcing link up. */
2936 ret_val = e1000_config_fc_after_link_up(hw); 2978 ret_val = e1000_config_fc_after_link_up(hw);
2937 if(ret_val) { 2979 if (ret_val) {
2938 DEBUGOUT("Error configuring flow control\n"); 2980 DEBUGOUT("Error configuring flow control\n");
2939 return ret_val; 2981 return ret_val;
2940 } 2982 }
@@ -2944,9 +2986,9 @@ e1000_check_for_link(struct e1000_hw *hw)
2944 * Device Control register in an attempt to auto-negotiate with our link 2986 * Device Control register in an attempt to auto-negotiate with our link
2945 * partner. 2987 * partner.
2946 */ 2988 */
2947 else if(((hw->media_type == e1000_media_type_fiber) || 2989 else if (((hw->media_type == e1000_media_type_fiber) ||
2948 (hw->media_type == e1000_media_type_internal_serdes)) && 2990 (hw->media_type == e1000_media_type_internal_serdes)) &&
2949 (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 2991 (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
2950 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); 2992 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
2951 E1000_WRITE_REG(hw, TXCW, hw->txcw); 2993 E1000_WRITE_REG(hw, TXCW, hw->txcw);
2952 E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); 2994 E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -2956,12 +2998,12 @@ e1000_check_for_link(struct e1000_hw *hw)
2956 /* If we force link for non-auto-negotiation switch, check link status 2998 /* If we force link for non-auto-negotiation switch, check link status
2957 * based on MAC synchronization for internal serdes media type. 2999 * based on MAC synchronization for internal serdes media type.
2958 */ 3000 */
2959 else if((hw->media_type == e1000_media_type_internal_serdes) && 3001 else if ((hw->media_type == e1000_media_type_internal_serdes) &&
2960 !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { 3002 !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
2961 /* SYNCH bit and IV bit are sticky. */ 3003 /* SYNCH bit and IV bit are sticky. */
2962 udelay(10); 3004 udelay(10);
2963 if(E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { 3005 if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
2964 if(!(rxcw & E1000_RXCW_IV)) { 3006 if (!(rxcw & E1000_RXCW_IV)) {
2965 hw->serdes_link_down = FALSE; 3007 hw->serdes_link_down = FALSE;
2966 DEBUGOUT("SERDES: Link is up.\n"); 3008 DEBUGOUT("SERDES: Link is up.\n");
2967 } 3009 }
@@ -2970,8 +3012,8 @@ e1000_check_for_link(struct e1000_hw *hw)
2970 DEBUGOUT("SERDES: Link is down.\n"); 3012 DEBUGOUT("SERDES: Link is down.\n");
2971 } 3013 }
2972 } 3014 }
2973 if((hw->media_type == e1000_media_type_internal_serdes) && 3015 if ((hw->media_type == e1000_media_type_internal_serdes) &&
2974 (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) { 3016 (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
2975 hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS)); 3017 hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
2976 } 3018 }
2977 return E1000_SUCCESS; 3019 return E1000_SUCCESS;
@@ -2995,12 +3037,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
2995 3037
2996 DEBUGFUNC("e1000_get_speed_and_duplex"); 3038 DEBUGFUNC("e1000_get_speed_and_duplex");
2997 3039
2998 if(hw->mac_type >= e1000_82543) { 3040 if (hw->mac_type >= e1000_82543) {
2999 status = E1000_READ_REG(hw, STATUS); 3041 status = E1000_READ_REG(hw, STATUS);
3000 if(status & E1000_STATUS_SPEED_1000) { 3042 if (status & E1000_STATUS_SPEED_1000) {
3001 *speed = SPEED_1000; 3043 *speed = SPEED_1000;
3002 DEBUGOUT("1000 Mbs, "); 3044 DEBUGOUT("1000 Mbs, ");
3003 } else if(status & E1000_STATUS_SPEED_100) { 3045 } else if (status & E1000_STATUS_SPEED_100) {
3004 *speed = SPEED_100; 3046 *speed = SPEED_100;
3005 DEBUGOUT("100 Mbs, "); 3047 DEBUGOUT("100 Mbs, ");
3006 } else { 3048 } else {
@@ -3008,7 +3050,7 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
3008 DEBUGOUT("10 Mbs, "); 3050 DEBUGOUT("10 Mbs, ");
3009 } 3051 }
3010 3052
3011 if(status & E1000_STATUS_FD) { 3053 if (status & E1000_STATUS_FD) {
3012 *duplex = FULL_DUPLEX; 3054 *duplex = FULL_DUPLEX;
3013 DEBUGOUT("Full Duplex\n"); 3055 DEBUGOUT("Full Duplex\n");
3014 } else { 3056 } else {
@@ -3025,18 +3067,18 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
3025 * if it is operating at half duplex. Here we set the duplex settings to 3067 * if it is operating at half duplex. Here we set the duplex settings to
3026 * match the duplex in the link partner's capabilities. 3068 * match the duplex in the link partner's capabilities.
3027 */ 3069 */
3028 if(hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { 3070 if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
3029 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); 3071 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
3030 if(ret_val) 3072 if (ret_val)
3031 return ret_val; 3073 return ret_val;
3032 3074
3033 if(!(phy_data & NWAY_ER_LP_NWAY_CAPS)) 3075 if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
3034 *duplex = HALF_DUPLEX; 3076 *duplex = HALF_DUPLEX;
3035 else { 3077 else {
3036 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); 3078 ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
3037 if(ret_val) 3079 if (ret_val)
3038 return ret_val; 3080 return ret_val;
3039 if((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) || 3081 if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
3040 (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) 3082 (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
3041 *duplex = HALF_DUPLEX; 3083 *duplex = HALF_DUPLEX;
3042 } 3084 }
@@ -3077,17 +3119,17 @@ e1000_wait_autoneg(struct e1000_hw *hw)
3077 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 3119 DEBUGOUT("Waiting for Auto-Neg to complete.\n");
3078 3120
3079 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 3121 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
3080 for(i = PHY_AUTO_NEG_TIME; i > 0; i--) { 3122 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
3081 /* Read the MII Status Register and wait for Auto-Neg 3123 /* Read the MII Status Register and wait for Auto-Neg
3082 * Complete bit to be set. 3124 * Complete bit to be set.
3083 */ 3125 */
3084 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3126 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3085 if(ret_val) 3127 if (ret_val)
3086 return ret_val; 3128 return ret_val;
3087 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3129 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3088 if(ret_val) 3130 if (ret_val)
3089 return ret_val; 3131 return ret_val;
3090 if(phy_data & MII_SR_AUTONEG_COMPLETE) { 3132 if (phy_data & MII_SR_AUTONEG_COMPLETE) {
3091 return E1000_SUCCESS; 3133 return E1000_SUCCESS;
3092 } 3134 }
3093 msec_delay(100); 3135 msec_delay(100);
@@ -3160,14 +3202,16 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
3160 /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ 3202 /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
3161 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); 3203 ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
3162 3204
3163 while(mask) { 3205 while (mask) {
3164 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and 3206 /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
3165 * then raising and lowering the Management Data Clock. A "0" is 3207 * then raising and lowering the Management Data Clock. A "0" is
3166 * shifted out to the PHY by setting the MDIO bit to "0" and then 3208 * shifted out to the PHY by setting the MDIO bit to "0" and then
3167 * raising and lowering the clock. 3209 * raising and lowering the clock.
3168 */ 3210 */
3169 if(data & mask) ctrl |= E1000_CTRL_MDIO; 3211 if (data & mask)
3170 else ctrl &= ~E1000_CTRL_MDIO; 3212 ctrl |= E1000_CTRL_MDIO;
3213 else
3214 ctrl &= ~E1000_CTRL_MDIO;
3171 3215
3172 E1000_WRITE_REG(hw, CTRL, ctrl); 3216 E1000_WRITE_REG(hw, CTRL, ctrl);
3173 E1000_WRITE_FLUSH(hw); 3217 E1000_WRITE_FLUSH(hw);
@@ -3218,12 +3262,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3218 e1000_raise_mdi_clk(hw, &ctrl); 3262 e1000_raise_mdi_clk(hw, &ctrl);
3219 e1000_lower_mdi_clk(hw, &ctrl); 3263 e1000_lower_mdi_clk(hw, &ctrl);
3220 3264
3221 for(data = 0, i = 0; i < 16; i++) { 3265 for (data = 0, i = 0; i < 16; i++) {
3222 data = data << 1; 3266 data = data << 1;
3223 e1000_raise_mdi_clk(hw, &ctrl); 3267 e1000_raise_mdi_clk(hw, &ctrl);
3224 ctrl = E1000_READ_REG(hw, CTRL); 3268 ctrl = E1000_READ_REG(hw, CTRL);
3225 /* Check to see if we shifted in a "1". */ 3269 /* Check to see if we shifted in a "1". */
3226 if(ctrl & E1000_CTRL_MDIO) data |= 1; 3270 if (ctrl & E1000_CTRL_MDIO)
3271 data |= 1;
3227 e1000_lower_mdi_clk(hw, &ctrl); 3272 e1000_lower_mdi_clk(hw, &ctrl);
3228 } 3273 }
3229 3274
@@ -3233,7 +3278,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3233 return data; 3278 return data;
3234} 3279}
3235 3280
3236int32_t 3281static int32_t
3237e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) 3282e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3238{ 3283{
3239 uint32_t swfw_sync = 0; 3284 uint32_t swfw_sync = 0;
@@ -3249,7 +3294,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3249 if (!hw->swfw_sync_present) 3294 if (!hw->swfw_sync_present)
3250 return e1000_get_hw_eeprom_semaphore(hw); 3295 return e1000_get_hw_eeprom_semaphore(hw);
3251 3296
3252 while(timeout) { 3297 while (timeout) {
3253 if (e1000_get_hw_eeprom_semaphore(hw)) 3298 if (e1000_get_hw_eeprom_semaphore(hw))
3254 return -E1000_ERR_SWFW_SYNC; 3299 return -E1000_ERR_SWFW_SYNC;
3255 3300
@@ -3277,7 +3322,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3277 return E1000_SUCCESS; 3322 return E1000_SUCCESS;
3278} 3323}
3279 3324
3280void 3325static void
3281e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) 3326e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3282{ 3327{
3283 uint32_t swfw_sync; 3328 uint32_t swfw_sync;
@@ -3338,7 +3383,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3338 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3383 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3339 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3384 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3340 (uint16_t)reg_addr); 3385 (uint16_t)reg_addr);
3341 if(ret_val) { 3386 if (ret_val) {
3342 e1000_swfw_sync_release(hw, swfw); 3387 e1000_swfw_sync_release(hw, swfw);
3343 return ret_val; 3388 return ret_val;
3344 } 3389 }
@@ -3383,12 +3428,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw,
3383 3428
3384 DEBUGFUNC("e1000_read_phy_reg_ex"); 3429 DEBUGFUNC("e1000_read_phy_reg_ex");
3385 3430
3386 if(reg_addr > MAX_PHY_REG_ADDRESS) { 3431 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3387 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 3432 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3388 return -E1000_ERR_PARAM; 3433 return -E1000_ERR_PARAM;
3389 } 3434 }
3390 3435
3391 if(hw->mac_type > e1000_82543) { 3436 if (hw->mac_type > e1000_82543) {
3392 /* Set up Op-code, Phy Address, and register address in the MDI 3437 /* Set up Op-code, Phy Address, and register address in the MDI
3393 * Control register. The MAC will take care of interfacing with the 3438 * Control register. The MAC will take care of interfacing with the
3394 * PHY to retrieve the desired data. 3439 * PHY to retrieve the desired data.
@@ -3400,16 +3445,16 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw,
3400 E1000_WRITE_REG(hw, MDIC, mdic); 3445 E1000_WRITE_REG(hw, MDIC, mdic);
3401 3446
3402 /* Poll the ready bit to see if the MDI read completed */ 3447 /* Poll the ready bit to see if the MDI read completed */
3403 for(i = 0; i < 64; i++) { 3448 for (i = 0; i < 64; i++) {
3404 udelay(50); 3449 udelay(50);
3405 mdic = E1000_READ_REG(hw, MDIC); 3450 mdic = E1000_READ_REG(hw, MDIC);
3406 if(mdic & E1000_MDIC_READY) break; 3451 if (mdic & E1000_MDIC_READY) break;
3407 } 3452 }
3408 if(!(mdic & E1000_MDIC_READY)) { 3453 if (!(mdic & E1000_MDIC_READY)) {
3409 DEBUGOUT("MDI Read did not complete\n"); 3454 DEBUGOUT("MDI Read did not complete\n");
3410 return -E1000_ERR_PHY; 3455 return -E1000_ERR_PHY;
3411 } 3456 }
3412 if(mdic & E1000_MDIC_ERROR) { 3457 if (mdic & E1000_MDIC_ERROR) {
3413 DEBUGOUT("MDI Error\n"); 3458 DEBUGOUT("MDI Error\n");
3414 return -E1000_ERR_PHY; 3459 return -E1000_ERR_PHY;
3415 } 3460 }
@@ -3478,7 +3523,7 @@ e1000_write_phy_reg(struct e1000_hw *hw,
3478 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3523 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3479 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3524 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3480 (uint16_t)reg_addr); 3525 (uint16_t)reg_addr);
3481 if(ret_val) { 3526 if (ret_val) {
3482 e1000_swfw_sync_release(hw, swfw); 3527 e1000_swfw_sync_release(hw, swfw);
3483 return ret_val; 3528 return ret_val;
3484 } 3529 }
@@ -3523,12 +3568,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
3523 3568
3524 DEBUGFUNC("e1000_write_phy_reg_ex"); 3569 DEBUGFUNC("e1000_write_phy_reg_ex");
3525 3570
3526 if(reg_addr > MAX_PHY_REG_ADDRESS) { 3571 if (reg_addr > MAX_PHY_REG_ADDRESS) {
3527 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 3572 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
3528 return -E1000_ERR_PARAM; 3573 return -E1000_ERR_PARAM;
3529 } 3574 }
3530 3575
3531 if(hw->mac_type > e1000_82543) { 3576 if (hw->mac_type > e1000_82543) {
3532 /* Set up Op-code, Phy Address, register address, and data intended 3577 /* Set up Op-code, Phy Address, register address, and data intended
3533 * for the PHY register in the MDI Control register. The MAC will take 3578 * for the PHY register in the MDI Control register. The MAC will take
3534 * care of interfacing with the PHY to send the desired data. 3579 * care of interfacing with the PHY to send the desired data.
@@ -3541,12 +3586,12 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
3541 E1000_WRITE_REG(hw, MDIC, mdic); 3586 E1000_WRITE_REG(hw, MDIC, mdic);
3542 3587
3543 /* Poll the ready bit to see if the MDI read completed */ 3588 /* Poll the ready bit to see if the MDI read completed */
3544 for(i = 0; i < 640; i++) { 3589 for (i = 0; i < 641; i++) {
3545 udelay(5); 3590 udelay(5);
3546 mdic = E1000_READ_REG(hw, MDIC); 3591 mdic = E1000_READ_REG(hw, MDIC);
3547 if(mdic & E1000_MDIC_READY) break; 3592 if (mdic & E1000_MDIC_READY) break;
3548 } 3593 }
3549 if(!(mdic & E1000_MDIC_READY)) { 3594 if (!(mdic & E1000_MDIC_READY)) {
3550 DEBUGOUT("MDI Write did not complete\n"); 3595 DEBUGOUT("MDI Write did not complete\n");
3551 return -E1000_ERR_PHY; 3596 return -E1000_ERR_PHY;
3552 } 3597 }
@@ -3575,7 +3620,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
3575 return E1000_SUCCESS; 3620 return E1000_SUCCESS;
3576} 3621}
3577 3622
3578int32_t 3623static int32_t
3579e1000_read_kmrn_reg(struct e1000_hw *hw, 3624e1000_read_kmrn_reg(struct e1000_hw *hw,
3580 uint32_t reg_addr, 3625 uint32_t reg_addr,
3581 uint16_t *data) 3626 uint16_t *data)
@@ -3608,7 +3653,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
3608 return E1000_SUCCESS; 3653 return E1000_SUCCESS;
3609} 3654}
3610 3655
3611int32_t 3656static int32_t
3612e1000_write_kmrn_reg(struct e1000_hw *hw, 3657e1000_write_kmrn_reg(struct e1000_hw *hw,
3613 uint32_t reg_addr, 3658 uint32_t reg_addr,
3614 uint16_t data) 3659 uint16_t data)
@@ -3658,7 +3703,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3658 3703
3659 DEBUGOUT("Resetting Phy...\n"); 3704 DEBUGOUT("Resetting Phy...\n");
3660 3705
3661 if(hw->mac_type > e1000_82543) { 3706 if (hw->mac_type > e1000_82543) {
3662 if ((hw->mac_type == e1000_80003es2lan) && 3707 if ((hw->mac_type == e1000_80003es2lan) &&
3663 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) { 3708 (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
3664 swfw = E1000_SWFW_PHY1_SM; 3709 swfw = E1000_SWFW_PHY1_SM;
@@ -3706,7 +3751,7 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3706 } 3751 }
3707 udelay(150); 3752 udelay(150);
3708 3753
3709 if((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { 3754 if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
3710 /* Configure activity LED after PHY reset */ 3755 /* Configure activity LED after PHY reset */
3711 led_ctrl = E1000_READ_REG(hw, LEDCTL); 3756 led_ctrl = E1000_READ_REG(hw, LEDCTL);
3712 led_ctrl &= IGP_ACTIVITY_LED_MASK; 3757 led_ctrl &= IGP_ACTIVITY_LED_MASK;
@@ -3716,14 +3761,13 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3716 3761
3717 /* Wait for FW to finish PHY configuration. */ 3762 /* Wait for FW to finish PHY configuration. */
3718 ret_val = e1000_get_phy_cfg_done(hw); 3763 ret_val = e1000_get_phy_cfg_done(hw);
3764 if (ret_val != E1000_SUCCESS)
3765 return ret_val;
3719 e1000_release_software_semaphore(hw); 3766 e1000_release_software_semaphore(hw);
3720 3767
3721 if ((hw->mac_type == e1000_ich8lan) && 3768 if ((hw->mac_type == e1000_ich8lan) && (hw->phy_type == e1000_phy_igp_3))
3722 (hw->phy_type == e1000_phy_igp_3)) { 3769 ret_val = e1000_init_lcd_from_nvm(hw);
3723 ret_val = e1000_init_lcd_from_nvm(hw); 3770
3724 if (ret_val)
3725 return ret_val;
3726 }
3727 return ret_val; 3771 return ret_val;
3728} 3772}
3729 3773
@@ -3754,25 +3798,25 @@ e1000_phy_reset(struct e1000_hw *hw)
3754 case e1000_82572: 3798 case e1000_82572:
3755 case e1000_ich8lan: 3799 case e1000_ich8lan:
3756 ret_val = e1000_phy_hw_reset(hw); 3800 ret_val = e1000_phy_hw_reset(hw);
3757 if(ret_val) 3801 if (ret_val)
3758 return ret_val; 3802 return ret_val;
3759 3803
3760 break; 3804 break;
3761 default: 3805 default:
3762 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); 3806 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
3763 if(ret_val) 3807 if (ret_val)
3764 return ret_val; 3808 return ret_val;
3765 3809
3766 phy_data |= MII_CR_RESET; 3810 phy_data |= MII_CR_RESET;
3767 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); 3811 ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
3768 if(ret_val) 3812 if (ret_val)
3769 return ret_val; 3813 return ret_val;
3770 3814
3771 udelay(1); 3815 udelay(1);
3772 break; 3816 break;
3773 } 3817 }
3774 3818
3775 if(hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2) 3819 if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
3776 e1000_phy_init_script(hw); 3820 e1000_phy_init_script(hw);
3777 3821
3778 return E1000_SUCCESS; 3822 return E1000_SUCCESS;
@@ -3839,7 +3883,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3839* 3883*
3840* hw - struct containing variables accessed by shared code 3884* hw - struct containing variables accessed by shared code
3841******************************************************************************/ 3885******************************************************************************/
3842int32_t 3886static int32_t
3843e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) 3887e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3844{ 3888{
3845 int32_t ret_val; 3889 int32_t ret_val;
@@ -3850,8 +3894,8 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3850 if (hw->kmrn_lock_loss_workaround_disabled) 3894 if (hw->kmrn_lock_loss_workaround_disabled)
3851 return E1000_SUCCESS; 3895 return E1000_SUCCESS;
3852 3896
3853 /* Make sure link is up before proceeding. If not just return. 3897 /* Make sure link is up before proceeding. If not just return.
3854 * Attempting this while link is negotiating fouls up link 3898 * Attempting this while link is negotiating fouled up link
3855 * stability */ 3899 * stability */
3856 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3900 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
3857 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 3901 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
@@ -3928,34 +3972,34 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
3928 hw->phy_id = (uint32_t) (phy_id_high << 16); 3972 hw->phy_id = (uint32_t) (phy_id_high << 16);
3929 udelay(20); 3973 udelay(20);
3930 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); 3974 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
3931 if(ret_val) 3975 if (ret_val)
3932 return ret_val; 3976 return ret_val;
3933 3977
3934 hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK); 3978 hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
3935 hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK; 3979 hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
3936 3980
3937 switch(hw->mac_type) { 3981 switch (hw->mac_type) {
3938 case e1000_82543: 3982 case e1000_82543:
3939 if(hw->phy_id == M88E1000_E_PHY_ID) match = TRUE; 3983 if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
3940 break; 3984 break;
3941 case e1000_82544: 3985 case e1000_82544:
3942 if(hw->phy_id == M88E1000_I_PHY_ID) match = TRUE; 3986 if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
3943 break; 3987 break;
3944 case e1000_82540: 3988 case e1000_82540:
3945 case e1000_82545: 3989 case e1000_82545:
3946 case e1000_82545_rev_3: 3990 case e1000_82545_rev_3:
3947 case e1000_82546: 3991 case e1000_82546:
3948 case e1000_82546_rev_3: 3992 case e1000_82546_rev_3:
3949 if(hw->phy_id == M88E1011_I_PHY_ID) match = TRUE; 3993 if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
3950 break; 3994 break;
3951 case e1000_82541: 3995 case e1000_82541:
3952 case e1000_82541_rev_2: 3996 case e1000_82541_rev_2:
3953 case e1000_82547: 3997 case e1000_82547:
3954 case e1000_82547_rev_2: 3998 case e1000_82547_rev_2:
3955 if(hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 3999 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
3956 break; 4000 break;
3957 case e1000_82573: 4001 case e1000_82573:
3958 if(hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; 4002 if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
3959 break; 4003 break;
3960 case e1000_80003es2lan: 4004 case e1000_80003es2lan:
3961 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; 4005 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
@@ -3994,14 +4038,14 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
3994 do { 4038 do {
3995 if (hw->phy_type != e1000_phy_gg82563) { 4039 if (hw->phy_type != e1000_phy_gg82563) {
3996 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 4040 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
3997 if(ret_val) break; 4041 if (ret_val) break;
3998 } 4042 }
3999 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); 4043 ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
4000 if(ret_val) break; 4044 if (ret_val) break;
4001 ret_val = e1000_write_phy_reg(hw, 30, 0x0000); 4045 ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
4002 if(ret_val) break; 4046 if (ret_val) break;
4003 ret_val = E1000_SUCCESS; 4047 ret_val = E1000_SUCCESS;
4004 } while(0); 4048 } while (0);
4005 4049
4006 return ret_val; 4050 return ret_val;
4007} 4051}
@@ -4033,23 +4077,23 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4033 4077
4034 /* Check polarity status */ 4078 /* Check polarity status */
4035 ret_val = e1000_check_polarity(hw, &polarity); 4079 ret_val = e1000_check_polarity(hw, &polarity);
4036 if(ret_val) 4080 if (ret_val)
4037 return ret_val; 4081 return ret_val;
4038 4082
4039 phy_info->cable_polarity = polarity; 4083 phy_info->cable_polarity = polarity;
4040 4084
4041 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); 4085 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
4042 if(ret_val) 4086 if (ret_val)
4043 return ret_val; 4087 return ret_val;
4044 4088
4045 phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >> 4089 phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >>
4046 IGP01E1000_PSSR_MDIX_SHIFT; 4090 IGP01E1000_PSSR_MDIX_SHIFT;
4047 4091
4048 if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 4092 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
4049 IGP01E1000_PSSR_SPEED_1000MBPS) { 4093 IGP01E1000_PSSR_SPEED_1000MBPS) {
4050 /* Local/Remote Receiver Information are only valid at 1000 Mbps */ 4094 /* Local/Remote Receiver Information are only valid at 1000 Mbps */
4051 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 4095 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
4052 if(ret_val) 4096 if (ret_val)
4053 return ret_val; 4097 return ret_val;
4054 4098
4055 phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >> 4099 phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
@@ -4059,19 +4103,19 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4059 4103
4060 /* Get cable length */ 4104 /* Get cable length */
4061 ret_val = e1000_get_cable_length(hw, &min_length, &max_length); 4105 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
4062 if(ret_val) 4106 if (ret_val)
4063 return ret_val; 4107 return ret_val;
4064 4108
4065 /* Translate to old method */ 4109 /* Translate to old method */
4066 average = (max_length + min_length) / 2; 4110 average = (max_length + min_length) / 2;
4067 4111
4068 if(average <= e1000_igp_cable_length_50) 4112 if (average <= e1000_igp_cable_length_50)
4069 phy_info->cable_length = e1000_cable_length_50; 4113 phy_info->cable_length = e1000_cable_length_50;
4070 else if(average <= e1000_igp_cable_length_80) 4114 else if (average <= e1000_igp_cable_length_80)
4071 phy_info->cable_length = e1000_cable_length_50_80; 4115 phy_info->cable_length = e1000_cable_length_50_80;
4072 else if(average <= e1000_igp_cable_length_110) 4116 else if (average <= e1000_igp_cable_length_110)
4073 phy_info->cable_length = e1000_cable_length_80_110; 4117 phy_info->cable_length = e1000_cable_length_80_110;
4074 else if(average <= e1000_igp_cable_length_140) 4118 else if (average <= e1000_igp_cable_length_140)
4075 phy_info->cable_length = e1000_cable_length_110_140; 4119 phy_info->cable_length = e1000_cable_length_110_140;
4076 else 4120 else
4077 phy_info->cable_length = e1000_cable_length_140; 4121 phy_info->cable_length = e1000_cable_length_140;
@@ -4086,7 +4130,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4086* hw - Struct containing variables accessed by shared code 4130* hw - Struct containing variables accessed by shared code
4087* phy_info - PHY information structure 4131* phy_info - PHY information structure
4088******************************************************************************/ 4132******************************************************************************/
4089int32_t 4133static int32_t
4090e1000_phy_ife_get_info(struct e1000_hw *hw, 4134e1000_phy_ife_get_info(struct e1000_hw *hw,
4091 struct e1000_phy_info *phy_info) 4135 struct e1000_phy_info *phy_info)
4092{ 4136{
@@ -4147,7 +4191,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
4147 phy_info->downshift = (e1000_downshift)hw->speed_downgraded; 4191 phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
4148 4192
4149 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 4193 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
4150 if(ret_val) 4194 if (ret_val)
4151 return ret_val; 4195 return ret_val;
4152 4196
4153 phy_info->extended_10bt_distance = 4197 phy_info->extended_10bt_distance =
@@ -4159,12 +4203,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
4159 4203
4160 /* Check polarity status */ 4204 /* Check polarity status */
4161 ret_val = e1000_check_polarity(hw, &polarity); 4205 ret_val = e1000_check_polarity(hw, &polarity);
4162 if(ret_val) 4206 if (ret_val)
4163 return ret_val; 4207 return ret_val;
4164 phy_info->cable_polarity = polarity; 4208 phy_info->cable_polarity = polarity;
4165 4209
4166 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 4210 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
4167 if(ret_val) 4211 if (ret_val)
4168 return ret_val; 4212 return ret_val;
4169 4213
4170 phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >> 4214 phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
@@ -4187,7 +4231,7 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
4187 } 4231 }
4188 4232
4189 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 4233 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
4190 if(ret_val) 4234 if (ret_val)
4191 return ret_val; 4235 return ret_val;
4192 4236
4193 phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >> 4237 phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
@@ -4224,20 +4268,20 @@ e1000_phy_get_info(struct e1000_hw *hw,
4224 phy_info->local_rx = e1000_1000t_rx_status_undefined; 4268 phy_info->local_rx = e1000_1000t_rx_status_undefined;
4225 phy_info->remote_rx = e1000_1000t_rx_status_undefined; 4269 phy_info->remote_rx = e1000_1000t_rx_status_undefined;
4226 4270
4227 if(hw->media_type != e1000_media_type_copper) { 4271 if (hw->media_type != e1000_media_type_copper) {
4228 DEBUGOUT("PHY info is only valid for copper media\n"); 4272 DEBUGOUT("PHY info is only valid for copper media\n");
4229 return -E1000_ERR_CONFIG; 4273 return -E1000_ERR_CONFIG;
4230 } 4274 }
4231 4275
4232 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 4276 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
4233 if(ret_val) 4277 if (ret_val)
4234 return ret_val; 4278 return ret_val;
4235 4279
4236 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); 4280 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
4237 if(ret_val) 4281 if (ret_val)
4238 return ret_val; 4282 return ret_val;
4239 4283
4240 if((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { 4284 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
4241 DEBUGOUT("PHY info is only valid if link is up\n"); 4285 DEBUGOUT("PHY info is only valid if link is up\n");
4242 return -E1000_ERR_CONFIG; 4286 return -E1000_ERR_CONFIG;
4243 } 4287 }
@@ -4257,7 +4301,7 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
4257{ 4301{
4258 DEBUGFUNC("e1000_validate_mdi_settings"); 4302 DEBUGFUNC("e1000_validate_mdi_settings");
4259 4303
4260 if(!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 4304 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
4261 DEBUGOUT("Invalid MDI setting detected\n"); 4305 DEBUGOUT("Invalid MDI setting detected\n");
4262 hw->mdix = 1; 4306 hw->mdix = 1;
4263 return -E1000_ERR_CONFIG; 4307 return -E1000_ERR_CONFIG;
@@ -4304,7 +4348,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4304 eeprom->type = e1000_eeprom_microwire; 4348 eeprom->type = e1000_eeprom_microwire;
4305 eeprom->opcode_bits = 3; 4349 eeprom->opcode_bits = 3;
4306 eeprom->delay_usec = 50; 4350 eeprom->delay_usec = 50;
4307 if(eecd & E1000_EECD_SIZE) { 4351 if (eecd & E1000_EECD_SIZE) {
4308 eeprom->word_size = 256; 4352 eeprom->word_size = 256;
4309 eeprom->address_bits = 8; 4353 eeprom->address_bits = 8;
4310 } else { 4354 } else {
@@ -4372,7 +4416,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4372 } 4416 }
4373 eeprom->use_eerd = TRUE; 4417 eeprom->use_eerd = TRUE;
4374 eeprom->use_eewr = TRUE; 4418 eeprom->use_eewr = TRUE;
4375 if(e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 4419 if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
4376 eeprom->type = e1000_eeprom_flash; 4420 eeprom->type = e1000_eeprom_flash;
4377 eeprom->word_size = 2048; 4421 eeprom->word_size = 2048;
4378 4422
@@ -4433,17 +4477,17 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4433 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to 4477 /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
4434 * 32KB (incremented by powers of 2). 4478 * 32KB (incremented by powers of 2).
4435 */ 4479 */
4436 if(hw->mac_type <= e1000_82547_rev_2) { 4480 if (hw->mac_type <= e1000_82547_rev_2) {
4437 /* Set to default value for initial eeprom read. */ 4481 /* Set to default value for initial eeprom read. */
4438 eeprom->word_size = 64; 4482 eeprom->word_size = 64;
4439 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); 4483 ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
4440 if(ret_val) 4484 if (ret_val)
4441 return ret_val; 4485 return ret_val;
4442 eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; 4486 eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
4443 /* 256B eeprom size was not supported in earlier hardware, so we 4487 /* 256B eeprom size was not supported in earlier hardware, so we
4444 * bump eeprom_size up one to ensure that "1" (which maps to 256B) 4488 * bump eeprom_size up one to ensure that "1" (which maps to 256B)
4445 * is never the result used in the shifting logic below. */ 4489 * is never the result used in the shifting logic below. */
4446 if(eeprom_size) 4490 if (eeprom_size)
4447 eeprom_size++; 4491 eeprom_size++;
4448 } else { 4492 } else {
4449 eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> 4493 eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
@@ -4528,7 +4572,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
4528 */ 4572 */
4529 eecd &= ~E1000_EECD_DI; 4573 eecd &= ~E1000_EECD_DI;
4530 4574
4531 if(data & mask) 4575 if (data & mask)
4532 eecd |= E1000_EECD_DI; 4576 eecd |= E1000_EECD_DI;
4533 4577
4534 E1000_WRITE_REG(hw, EECD, eecd); 4578 E1000_WRITE_REG(hw, EECD, eecd);
@@ -4541,7 +4585,7 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
4541 4585
4542 mask = mask >> 1; 4586 mask = mask >> 1;
4543 4587
4544 } while(mask); 4588 } while (mask);
4545 4589
4546 /* We leave the "DI" bit set to "0" when we leave this routine. */ 4590 /* We leave the "DI" bit set to "0" when we leave this routine. */
4547 eecd &= ~E1000_EECD_DI; 4591 eecd &= ~E1000_EECD_DI;
@@ -4573,14 +4617,14 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
4573 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); 4617 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
4574 data = 0; 4618 data = 0;
4575 4619
4576 for(i = 0; i < count; i++) { 4620 for (i = 0; i < count; i++) {
4577 data = data << 1; 4621 data = data << 1;
4578 e1000_raise_ee_clk(hw, &eecd); 4622 e1000_raise_ee_clk(hw, &eecd);
4579 4623
4580 eecd = E1000_READ_REG(hw, EECD); 4624 eecd = E1000_READ_REG(hw, EECD);
4581 4625
4582 eecd &= ~(E1000_EECD_DI); 4626 eecd &= ~(E1000_EECD_DI);
4583 if(eecd & E1000_EECD_DO) 4627 if (eecd & E1000_EECD_DO)
4584 data |= 1; 4628 data |= 1;
4585 4629
4586 e1000_lower_ee_clk(hw, &eecd); 4630 e1000_lower_ee_clk(hw, &eecd);
@@ -4611,17 +4655,17 @@ e1000_acquire_eeprom(struct e1000_hw *hw)
4611 4655
4612 if (hw->mac_type != e1000_82573) { 4656 if (hw->mac_type != e1000_82573) {
4613 /* Request EEPROM Access */ 4657 /* Request EEPROM Access */
4614 if(hw->mac_type > e1000_82544) { 4658 if (hw->mac_type > e1000_82544) {
4615 eecd |= E1000_EECD_REQ; 4659 eecd |= E1000_EECD_REQ;
4616 E1000_WRITE_REG(hw, EECD, eecd); 4660 E1000_WRITE_REG(hw, EECD, eecd);
4617 eecd = E1000_READ_REG(hw, EECD); 4661 eecd = E1000_READ_REG(hw, EECD);
4618 while((!(eecd & E1000_EECD_GNT)) && 4662 while ((!(eecd & E1000_EECD_GNT)) &&
4619 (i < E1000_EEPROM_GRANT_ATTEMPTS)) { 4663 (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
4620 i++; 4664 i++;
4621 udelay(5); 4665 udelay(5);
4622 eecd = E1000_READ_REG(hw, EECD); 4666 eecd = E1000_READ_REG(hw, EECD);
4623 } 4667 }
4624 if(!(eecd & E1000_EECD_GNT)) { 4668 if (!(eecd & E1000_EECD_GNT)) {
4625 eecd &= ~E1000_EECD_REQ; 4669 eecd &= ~E1000_EECD_REQ;
4626 E1000_WRITE_REG(hw, EECD, eecd); 4670 E1000_WRITE_REG(hw, EECD, eecd);
4627 DEBUGOUT("Could not acquire EEPROM grant\n"); 4671 DEBUGOUT("Could not acquire EEPROM grant\n");
@@ -4664,7 +4708,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
4664 4708
4665 eecd = E1000_READ_REG(hw, EECD); 4709 eecd = E1000_READ_REG(hw, EECD);
4666 4710
4667 if(eeprom->type == e1000_eeprom_microwire) { 4711 if (eeprom->type == e1000_eeprom_microwire) {
4668 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 4712 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
4669 E1000_WRITE_REG(hw, EECD, eecd); 4713 E1000_WRITE_REG(hw, EECD, eecd);
4670 E1000_WRITE_FLUSH(hw); 4714 E1000_WRITE_FLUSH(hw);
@@ -4687,7 +4731,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
4687 E1000_WRITE_REG(hw, EECD, eecd); 4731 E1000_WRITE_REG(hw, EECD, eecd);
4688 E1000_WRITE_FLUSH(hw); 4732 E1000_WRITE_FLUSH(hw);
4689 udelay(eeprom->delay_usec); 4733 udelay(eeprom->delay_usec);
4690 } else if(eeprom->type == e1000_eeprom_spi) { 4734 } else if (eeprom->type == e1000_eeprom_spi) {
4691 /* Toggle CS to flush commands */ 4735 /* Toggle CS to flush commands */
4692 eecd |= E1000_EECD_CS; 4736 eecd |= E1000_EECD_CS;
4693 E1000_WRITE_REG(hw, EECD, eecd); 4737 E1000_WRITE_REG(hw, EECD, eecd);
@@ -4721,7 +4765,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
4721 E1000_WRITE_REG(hw, EECD, eecd); 4765 E1000_WRITE_REG(hw, EECD, eecd);
4722 4766
4723 udelay(hw->eeprom.delay_usec); 4767 udelay(hw->eeprom.delay_usec);
4724 } else if(hw->eeprom.type == e1000_eeprom_microwire) { 4768 } else if (hw->eeprom.type == e1000_eeprom_microwire) {
4725 /* cleanup eeprom */ 4769 /* cleanup eeprom */
4726 4770
4727 /* CS on Microwire is active-high */ 4771 /* CS on Microwire is active-high */
@@ -4743,7 +4787,7 @@ e1000_release_eeprom(struct e1000_hw *hw)
4743 } 4787 }
4744 4788
4745 /* Stop requesting EEPROM access */ 4789 /* Stop requesting EEPROM access */
4746 if(hw->mac_type > e1000_82544) { 4790 if (hw->mac_type > e1000_82544) {
4747 eecd &= ~E1000_EECD_REQ; 4791 eecd &= ~E1000_EECD_REQ;
4748 E1000_WRITE_REG(hw, EECD, eecd); 4792 E1000_WRITE_REG(hw, EECD, eecd);
4749 } 4793 }
@@ -4781,12 +4825,12 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
4781 retry_count += 5; 4825 retry_count += 5;
4782 4826
4783 e1000_standby_eeprom(hw); 4827 e1000_standby_eeprom(hw);
4784 } while(retry_count < EEPROM_MAX_RETRY_SPI); 4828 } while (retry_count < EEPROM_MAX_RETRY_SPI);
4785 4829
4786 /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and 4830 /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
4787 * only 0-5mSec on 5V devices) 4831 * only 0-5mSec on 5V devices)
4788 */ 4832 */
4789 if(retry_count >= EEPROM_MAX_RETRY_SPI) { 4833 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
4790 DEBUGOUT("SPI EEPROM Status error\n"); 4834 DEBUGOUT("SPI EEPROM Status error\n");
4791 return -E1000_ERR_EEPROM; 4835 return -E1000_ERR_EEPROM;
4792 } 4836 }
@@ -4817,7 +4861,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
4817 /* A check for invalid values: offset too large, too many words, and not 4861 /* A check for invalid values: offset too large, too many words, and not
4818 * enough words. 4862 * enough words.
4819 */ 4863 */
4820 if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || 4864 if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
4821 (words == 0)) { 4865 (words == 0)) {
4822 DEBUGOUT("\"words\" parameter out of bounds\n"); 4866 DEBUGOUT("\"words\" parameter out of bounds\n");
4823 return -E1000_ERR_EEPROM; 4867 return -E1000_ERR_EEPROM;
@@ -4825,7 +4869,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
4825 4869
4826 /* FLASH reads without acquiring the semaphore are safe */ 4870 /* FLASH reads without acquiring the semaphore are safe */
4827 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && 4871 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
4828 hw->eeprom.use_eerd == FALSE) { 4872 hw->eeprom.use_eerd == FALSE) {
4829 switch (hw->mac_type) { 4873 switch (hw->mac_type) {
4830 case e1000_80003es2lan: 4874 case e1000_80003es2lan:
4831 break; 4875 break;
@@ -4852,7 +4896,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
4852 uint16_t word_in; 4896 uint16_t word_in;
4853 uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; 4897 uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
4854 4898
4855 if(e1000_spi_eeprom_ready(hw)) { 4899 if (e1000_spi_eeprom_ready(hw)) {
4856 e1000_release_eeprom(hw); 4900 e1000_release_eeprom(hw);
4857 return -E1000_ERR_EEPROM; 4901 return -E1000_ERR_EEPROM;
4858 } 4902 }
@@ -4860,7 +4904,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
4860 e1000_standby_eeprom(hw); 4904 e1000_standby_eeprom(hw);
4861 4905
4862 /* Some SPI eeproms use the 8th address bit embedded in the opcode */ 4906 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
4863 if((eeprom->address_bits == 8) && (offset >= 128)) 4907 if ((eeprom->address_bits == 8) && (offset >= 128))
4864 read_opcode |= EEPROM_A8_OPCODE_SPI; 4908 read_opcode |= EEPROM_A8_OPCODE_SPI;
4865 4909
4866 /* Send the READ command (opcode + addr) */ 4910 /* Send the READ command (opcode + addr) */
@@ -4876,7 +4920,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
4876 word_in = e1000_shift_in_ee_bits(hw, 16); 4920 word_in = e1000_shift_in_ee_bits(hw, 16);
4877 data[i] = (word_in >> 8) | (word_in << 8); 4921 data[i] = (word_in >> 8) | (word_in << 8);
4878 } 4922 }
4879 } else if(eeprom->type == e1000_eeprom_microwire) { 4923 } else if (eeprom->type == e1000_eeprom_microwire) {
4880 for (i = 0; i < words; i++) { 4924 for (i = 0; i < words; i++) {
4881 /* Send the READ command (opcode + addr) */ 4925 /* Send the READ command (opcode + addr) */
4882 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, 4926 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
@@ -4921,7 +4965,7 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
4921 E1000_WRITE_REG(hw, EERD, eerd); 4965 E1000_WRITE_REG(hw, EERD, eerd);
4922 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ); 4966 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
4923 4967
4924 if(error) { 4968 if (error) {
4925 break; 4969 break;
4926 } 4970 }
4927 data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA); 4971 data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
@@ -4958,7 +5002,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
4958 E1000_EEPROM_RW_REG_START; 5002 E1000_EEPROM_RW_REG_START;
4959 5003
4960 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); 5004 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
4961 if(error) { 5005 if (error) {
4962 break; 5006 break;
4963 } 5007 }
4964 5008
@@ -4966,7 +5010,7 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
4966 5010
4967 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE); 5011 error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
4968 5012
4969 if(error) { 5013 if (error) {
4970 break; 5014 break;
4971 } 5015 }
4972 } 5016 }
@@ -4987,13 +5031,13 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
4987 uint32_t i, reg = 0; 5031 uint32_t i, reg = 0;
4988 int32_t done = E1000_ERR_EEPROM; 5032 int32_t done = E1000_ERR_EEPROM;
4989 5033
4990 for(i = 0; i < attempts; i++) { 5034 for (i = 0; i < attempts; i++) {
4991 if(eerd == E1000_EEPROM_POLL_READ) 5035 if (eerd == E1000_EEPROM_POLL_READ)
4992 reg = E1000_READ_REG(hw, EERD); 5036 reg = E1000_READ_REG(hw, EERD);
4993 else 5037 else
4994 reg = E1000_READ_REG(hw, EEWR); 5038 reg = E1000_READ_REG(hw, EEWR);
4995 5039
4996 if(reg & E1000_EEPROM_RW_REG_DONE) { 5040 if (reg & E1000_EEPROM_RW_REG_DONE) {
4997 done = E1000_SUCCESS; 5041 done = E1000_SUCCESS;
4998 break; 5042 break;
4999 } 5043 }
@@ -5025,7 +5069,7 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5025 eecd = ((eecd >> 15) & 0x03); 5069 eecd = ((eecd >> 15) & 0x03);
5026 5070
5027 /* If both bits are set, device is Flash type */ 5071 /* If both bits are set, device is Flash type */
5028 if(eecd == 0x03) { 5072 if (eecd == 0x03) {
5029 return FALSE; 5073 return FALSE;
5030 } 5074 }
5031 } 5075 }
@@ -5090,7 +5134,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5090 checksum += eeprom_data; 5134 checksum += eeprom_data;
5091 } 5135 }
5092 5136
5093 if(checksum == (uint16_t) EEPROM_SUM) 5137 if (checksum == (uint16_t) EEPROM_SUM)
5094 return E1000_SUCCESS; 5138 return E1000_SUCCESS;
5095 else { 5139 else {
5096 DEBUGOUT("EEPROM Checksum Invalid\n"); 5140 DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5115,15 +5159,15 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
5115 5159
5116 DEBUGFUNC("e1000_update_eeprom_checksum"); 5160 DEBUGFUNC("e1000_update_eeprom_checksum");
5117 5161
5118 for(i = 0; i < EEPROM_CHECKSUM_REG; i++) { 5162 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
5119 if(e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 5163 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
5120 DEBUGOUT("EEPROM Read Error\n"); 5164 DEBUGOUT("EEPROM Read Error\n");
5121 return -E1000_ERR_EEPROM; 5165 return -E1000_ERR_EEPROM;
5122 } 5166 }
5123 checksum += eeprom_data; 5167 checksum += eeprom_data;
5124 } 5168 }
5125 checksum = (uint16_t) EEPROM_SUM - checksum; 5169 checksum = (uint16_t) EEPROM_SUM - checksum;
5126 if(e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 5170 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
5127 DEBUGOUT("EEPROM Write Error\n"); 5171 DEBUGOUT("EEPROM Write Error\n");
5128 return -E1000_ERR_EEPROM; 5172 return -E1000_ERR_EEPROM;
5129 } else if (hw->eeprom.type == e1000_eeprom_flash) { 5173 } else if (hw->eeprom.type == e1000_eeprom_flash) {
@@ -5165,14 +5209,14 @@ e1000_write_eeprom(struct e1000_hw *hw,
5165 /* A check for invalid values: offset too large, too many words, and not 5209 /* A check for invalid values: offset too large, too many words, and not
5166 * enough words. 5210 * enough words.
5167 */ 5211 */
5168 if((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) || 5212 if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
5169 (words == 0)) { 5213 (words == 0)) {
5170 DEBUGOUT("\"words\" parameter out of bounds\n"); 5214 DEBUGOUT("\"words\" parameter out of bounds\n");
5171 return -E1000_ERR_EEPROM; 5215 return -E1000_ERR_EEPROM;
5172 } 5216 }
5173 5217
5174 /* 82573 writes only through eewr */ 5218 /* 82573 writes only through eewr */
5175 if(eeprom->use_eewr == TRUE) 5219 if (eeprom->use_eewr == TRUE)
5176 return e1000_write_eeprom_eewr(hw, offset, words, data); 5220 return e1000_write_eeprom_eewr(hw, offset, words, data);
5177 5221
5178 if (eeprom->type == e1000_eeprom_ich8) 5222 if (eeprom->type == e1000_eeprom_ich8)
@@ -5182,7 +5226,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
5182 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 5226 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
5183 return -E1000_ERR_EEPROM; 5227 return -E1000_ERR_EEPROM;
5184 5228
5185 if(eeprom->type == e1000_eeprom_microwire) { 5229 if (eeprom->type == e1000_eeprom_microwire) {
5186 status = e1000_write_eeprom_microwire(hw, offset, words, data); 5230 status = e1000_write_eeprom_microwire(hw, offset, words, data);
5187 } else { 5231 } else {
5188 status = e1000_write_eeprom_spi(hw, offset, words, data); 5232 status = e1000_write_eeprom_spi(hw, offset, words, data);
@@ -5218,7 +5262,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5218 while (widx < words) { 5262 while (widx < words) {
5219 uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI; 5263 uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
5220 5264
5221 if(e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; 5265 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
5222 5266
5223 e1000_standby_eeprom(hw); 5267 e1000_standby_eeprom(hw);
5224 5268
@@ -5229,7 +5273,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5229 e1000_standby_eeprom(hw); 5273 e1000_standby_eeprom(hw);
5230 5274
5231 /* Some SPI eeproms use the 8th address bit embedded in the opcode */ 5275 /* Some SPI eeproms use the 8th address bit embedded in the opcode */
5232 if((eeprom->address_bits == 8) && (offset >= 128)) 5276 if ((eeprom->address_bits == 8) && (offset >= 128))
5233 write_opcode |= EEPROM_A8_OPCODE_SPI; 5277 write_opcode |= EEPROM_A8_OPCODE_SPI;
5234 5278
5235 /* Send the Write command (8-bit opcode + addr) */ 5279 /* Send the Write command (8-bit opcode + addr) */
@@ -5251,7 +5295,7 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5251 * operation, while the smaller eeproms are capable of an 8-byte 5295 * operation, while the smaller eeproms are capable of an 8-byte
5252 * PAGE WRITE operation. Break the inner loop to pass new address 5296 * PAGE WRITE operation. Break the inner loop to pass new address
5253 */ 5297 */
5254 if((((offset + widx)*2) % eeprom->page_size) == 0) { 5298 if ((((offset + widx)*2) % eeprom->page_size) == 0) {
5255 e1000_standby_eeprom(hw); 5299 e1000_standby_eeprom(hw);
5256 break; 5300 break;
5257 } 5301 }
@@ -5317,12 +5361,12 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5317 * signal that the command has been completed by raising the DO signal. 5361 * signal that the command has been completed by raising the DO signal.
5318 * If DO does not go high in 10 milliseconds, then error out. 5362 * If DO does not go high in 10 milliseconds, then error out.
5319 */ 5363 */
5320 for(i = 0; i < 200; i++) { 5364 for (i = 0; i < 200; i++) {
5321 eecd = E1000_READ_REG(hw, EECD); 5365 eecd = E1000_READ_REG(hw, EECD);
5322 if(eecd & E1000_EECD_DO) break; 5366 if (eecd & E1000_EECD_DO) break;
5323 udelay(50); 5367 udelay(50);
5324 } 5368 }
5325 if(i == 200) { 5369 if (i == 200) {
5326 DEBUGOUT("EEPROM Write did not complete\n"); 5370 DEBUGOUT("EEPROM Write did not complete\n");
5327 return -E1000_ERR_EEPROM; 5371 return -E1000_ERR_EEPROM;
5328 } 5372 }
@@ -5513,40 +5557,6 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5513} 5557}
5514 5558
5515/****************************************************************************** 5559/******************************************************************************
5516 * Reads the adapter's part number from the EEPROM
5517 *
5518 * hw - Struct containing variables accessed by shared code
5519 * part_num - Adapter's part number
5520 *****************************************************************************/
5521int32_t
5522e1000_read_part_num(struct e1000_hw *hw,
5523 uint32_t *part_num)
5524{
5525 uint16_t offset = EEPROM_PBA_BYTE_1;
5526 uint16_t eeprom_data;
5527
5528 DEBUGFUNC("e1000_read_part_num");
5529
5530 /* Get word 0 from EEPROM */
5531 if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
5532 DEBUGOUT("EEPROM Read Error\n");
5533 return -E1000_ERR_EEPROM;
5534 }
5535 /* Save word 0 in upper half of part_num */
5536 *part_num = (uint32_t) (eeprom_data << 16);
5537
5538 /* Get word 1 from EEPROM */
5539 if(e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) {
5540 DEBUGOUT("EEPROM Read Error\n");
5541 return -E1000_ERR_EEPROM;
5542 }
5543 /* Save word 1 in lower half of part_num */
5544 *part_num |= eeprom_data;
5545
5546 return E1000_SUCCESS;
5547}
5548
5549/******************************************************************************
5550 * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the 5560 * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
5551 * second function of dual function devices 5561 * second function of dual function devices
5552 * 5562 *
@@ -5560,9 +5570,9 @@ e1000_read_mac_addr(struct e1000_hw * hw)
5560 5570
5561 DEBUGFUNC("e1000_read_mac_addr"); 5571 DEBUGFUNC("e1000_read_mac_addr");
5562 5572
5563 for(i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 5573 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
5564 offset = i >> 1; 5574 offset = i >> 1;
5565 if(e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { 5575 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
5566 DEBUGOUT("EEPROM Read Error\n"); 5576 DEBUGOUT("EEPROM Read Error\n");
5567 return -E1000_ERR_EEPROM; 5577 return -E1000_ERR_EEPROM;
5568 } 5578 }
@@ -5577,12 +5587,12 @@ e1000_read_mac_addr(struct e1000_hw * hw)
5577 case e1000_82546_rev_3: 5587 case e1000_82546_rev_3:
5578 case e1000_82571: 5588 case e1000_82571:
5579 case e1000_80003es2lan: 5589 case e1000_80003es2lan:
5580 if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) 5590 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
5581 hw->perm_mac_addr[5] ^= 0x01; 5591 hw->perm_mac_addr[5] ^= 0x01;
5582 break; 5592 break;
5583 } 5593 }
5584 5594
5585 for(i = 0; i < NODE_ADDRESS_SIZE; i++) 5595 for (i = 0; i < NODE_ADDRESS_SIZE; i++)
5586 hw->mac_addr[i] = hw->perm_mac_addr[i]; 5596 hw->mac_addr[i] = hw->perm_mac_addr[i];
5587 return E1000_SUCCESS; 5597 return E1000_SUCCESS;
5588} 5598}
@@ -5621,7 +5631,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5621 5631
5622 /* Zero out the other 15 receive addresses. */ 5632 /* Zero out the other 15 receive addresses. */
5623 DEBUGOUT("Clearing RAR[1-15]\n"); 5633 DEBUGOUT("Clearing RAR[1-15]\n");
5624 for(i = 1; i < rar_num; i++) { 5634 for (i = 1; i < rar_num; i++) {
5625 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 5635 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5626 E1000_WRITE_FLUSH(hw); 5636 E1000_WRITE_FLUSH(hw);
5627 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 5637 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -5643,6 +5653,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5643 * for the first 15 multicast addresses, and hashes the rest into the 5653 * for the first 15 multicast addresses, and hashes the rest into the
5644 * multicast table. 5654 * multicast table.
5645 *****************************************************************************/ 5655 *****************************************************************************/
5656#if 0
5646void 5657void
5647e1000_mc_addr_list_update(struct e1000_hw *hw, 5658e1000_mc_addr_list_update(struct e1000_hw *hw,
5648 uint8_t *mc_addr_list, 5659 uint8_t *mc_addr_list,
@@ -5671,7 +5682,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5671 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) 5682 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
5672 num_rar_entry -= 1; 5683 num_rar_entry -= 1;
5673 5684
5674 for(i = rar_used_count; i < num_rar_entry; i++) { 5685 for (i = rar_used_count; i < num_rar_entry; i++) {
5675 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 5686 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
5676 E1000_WRITE_FLUSH(hw); 5687 E1000_WRITE_FLUSH(hw);
5677 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 5688 E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
@@ -5683,13 +5694,13 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5683 num_mta_entry = E1000_NUM_MTA_REGISTERS; 5694 num_mta_entry = E1000_NUM_MTA_REGISTERS;
5684 if (hw->mac_type == e1000_ich8lan) 5695 if (hw->mac_type == e1000_ich8lan)
5685 num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN; 5696 num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
5686 for(i = 0; i < num_mta_entry; i++) { 5697 for (i = 0; i < num_mta_entry; i++) {
5687 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 5698 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
5688 E1000_WRITE_FLUSH(hw); 5699 E1000_WRITE_FLUSH(hw);
5689 } 5700 }
5690 5701
5691 /* Add the new addresses */ 5702 /* Add the new addresses */
5692 for(i = 0; i < mc_addr_count; i++) { 5703 for (i = 0; i < mc_addr_count; i++) {
5693 DEBUGOUT(" Adding the multicast addresses:\n"); 5704 DEBUGOUT(" Adding the multicast addresses:\n");
5694 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, 5705 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
5695 mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)], 5706 mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)],
@@ -5719,6 +5730,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5719 } 5730 }
5720 DEBUGOUT("MC Update Complete\n"); 5731 DEBUGOUT("MC Update Complete\n");
5721} 5732}
5733#endif /* 0 */
5722 5734
5723/****************************************************************************** 5735/******************************************************************************
5724 * Hashes an address to determine its location in the multicast table 5736 * Hashes an address to determine its location in the multicast table
@@ -5820,7 +5832,7 @@ e1000_mta_set(struct e1000_hw *hw,
5820 * in the MTA, save off the previous entry before writing and 5832 * in the MTA, save off the previous entry before writing and
5821 * restore the old value after writing. 5833 * restore the old value after writing.
5822 */ 5834 */
5823 if((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) { 5835 if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
5824 temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1)); 5836 temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
5825 E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta); 5837 E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
5826 E1000_WRITE_FLUSH(hw); 5838 E1000_WRITE_FLUSH(hw);
@@ -5970,7 +5982,7 @@ e1000_id_led_init(struct e1000_hw * hw)
5970 5982
5971 DEBUGFUNC("e1000_id_led_init"); 5983 DEBUGFUNC("e1000_id_led_init");
5972 5984
5973 if(hw->mac_type < e1000_82540) { 5985 if (hw->mac_type < e1000_82540) {
5974 /* Nothing to do */ 5986 /* Nothing to do */
5975 return E1000_SUCCESS; 5987 return E1000_SUCCESS;
5976 } 5988 }
@@ -5980,7 +5992,7 @@ e1000_id_led_init(struct e1000_hw * hw)
5980 hw->ledctl_mode1 = hw->ledctl_default; 5992 hw->ledctl_mode1 = hw->ledctl_default;
5981 hw->ledctl_mode2 = hw->ledctl_default; 5993 hw->ledctl_mode2 = hw->ledctl_default;
5982 5994
5983 if(e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { 5995 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
5984 DEBUGOUT("EEPROM Read Error\n"); 5996 DEBUGOUT("EEPROM Read Error\n");
5985 return -E1000_ERR_EEPROM; 5997 return -E1000_ERR_EEPROM;
5986 } 5998 }
@@ -5997,7 +6009,7 @@ e1000_id_led_init(struct e1000_hw * hw)
5997 } 6009 }
5998 for (i = 0; i < 4; i++) { 6010 for (i = 0; i < 4; i++) {
5999 temp = (eeprom_data >> (i << 2)) & led_mask; 6011 temp = (eeprom_data >> (i << 2)) & led_mask;
6000 switch(temp) { 6012 switch (temp) {
6001 case ID_LED_ON1_DEF2: 6013 case ID_LED_ON1_DEF2:
6002 case ID_LED_ON1_ON2: 6014 case ID_LED_ON1_ON2:
6003 case ID_LED_ON1_OFF2: 6015 case ID_LED_ON1_OFF2:
@@ -6014,7 +6026,7 @@ e1000_id_led_init(struct e1000_hw * hw)
6014 /* Do nothing */ 6026 /* Do nothing */
6015 break; 6027 break;
6016 } 6028 }
6017 switch(temp) { 6029 switch (temp) {
6018 case ID_LED_DEF1_ON2: 6030 case ID_LED_DEF1_ON2:
6019 case ID_LED_ON1_ON2: 6031 case ID_LED_ON1_ON2:
6020 case ID_LED_OFF1_ON2: 6032 case ID_LED_OFF1_ON2:
@@ -6048,7 +6060,7 @@ e1000_setup_led(struct e1000_hw *hw)
6048 6060
6049 DEBUGFUNC("e1000_setup_led"); 6061 DEBUGFUNC("e1000_setup_led");
6050 6062
6051 switch(hw->mac_type) { 6063 switch (hw->mac_type) {
6052 case e1000_82542_rev2_0: 6064 case e1000_82542_rev2_0:
6053 case e1000_82542_rev2_1: 6065 case e1000_82542_rev2_1:
6054 case e1000_82543: 6066 case e1000_82543:
@@ -6062,16 +6074,16 @@ e1000_setup_led(struct e1000_hw *hw)
6062 /* Turn off PHY Smart Power Down (if enabled) */ 6074 /* Turn off PHY Smart Power Down (if enabled) */
6063 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, 6075 ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
6064 &hw->phy_spd_default); 6076 &hw->phy_spd_default);
6065 if(ret_val) 6077 if (ret_val)
6066 return ret_val; 6078 return ret_val;
6067 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, 6079 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6068 (uint16_t)(hw->phy_spd_default & 6080 (uint16_t)(hw->phy_spd_default &
6069 ~IGP01E1000_GMII_SPD)); 6081 ~IGP01E1000_GMII_SPD));
6070 if(ret_val) 6082 if (ret_val)
6071 return ret_val; 6083 return ret_val;
6072 /* Fall Through */ 6084 /* Fall Through */
6073 default: 6085 default:
6074 if(hw->media_type == e1000_media_type_fiber) { 6086 if (hw->media_type == e1000_media_type_fiber) {
6075 ledctl = E1000_READ_REG(hw, LEDCTL); 6087 ledctl = E1000_READ_REG(hw, LEDCTL);
6076 /* Save current LEDCTL settings */ 6088 /* Save current LEDCTL settings */
6077 hw->ledctl_default = ledctl; 6089 hw->ledctl_default = ledctl;
@@ -6082,7 +6094,7 @@ e1000_setup_led(struct e1000_hw *hw)
6082 ledctl |= (E1000_LEDCTL_MODE_LED_OFF << 6094 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
6083 E1000_LEDCTL_LED0_MODE_SHIFT); 6095 E1000_LEDCTL_LED0_MODE_SHIFT);
6084 E1000_WRITE_REG(hw, LEDCTL, ledctl); 6096 E1000_WRITE_REG(hw, LEDCTL, ledctl);
6085 } else if(hw->media_type == e1000_media_type_copper) 6097 } else if (hw->media_type == e1000_media_type_copper)
6086 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1); 6098 E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
6087 break; 6099 break;
6088 } 6100 }
@@ -6090,6 +6102,7 @@ e1000_setup_led(struct e1000_hw *hw)
6090 return E1000_SUCCESS; 6102 return E1000_SUCCESS;
6091} 6103}
6092 6104
6105
6093/****************************************************************************** 6106/******************************************************************************
6094 * Used on 82571 and later Si that has LED blink bits. 6107 * Used on 82571 and later Si that has LED blink bits.
6095 * Callers must use their own timer and should have already called 6108 * Callers must use their own timer and should have already called
@@ -6140,7 +6153,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
6140 6153
6141 DEBUGFUNC("e1000_cleanup_led"); 6154 DEBUGFUNC("e1000_cleanup_led");
6142 6155
6143 switch(hw->mac_type) { 6156 switch (hw->mac_type) {
6144 case e1000_82542_rev2_0: 6157 case e1000_82542_rev2_0:
6145 case e1000_82542_rev2_1: 6158 case e1000_82542_rev2_1:
6146 case e1000_82543: 6159 case e1000_82543:
@@ -6154,7 +6167,7 @@ e1000_cleanup_led(struct e1000_hw *hw)
6154 /* Turn on PHY Smart Power Down (if previously enabled) */ 6167 /* Turn on PHY Smart Power Down (if previously enabled) */
6155 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, 6168 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6156 hw->phy_spd_default); 6169 hw->phy_spd_default);
6157 if(ret_val) 6170 if (ret_val)
6158 return ret_val; 6171 return ret_val;
6159 /* Fall Through */ 6172 /* Fall Through */
6160 default: 6173 default:
@@ -6182,7 +6195,7 @@ e1000_led_on(struct e1000_hw *hw)
6182 6195
6183 DEBUGFUNC("e1000_led_on"); 6196 DEBUGFUNC("e1000_led_on");
6184 6197
6185 switch(hw->mac_type) { 6198 switch (hw->mac_type) {
6186 case e1000_82542_rev2_0: 6199 case e1000_82542_rev2_0:
6187 case e1000_82542_rev2_1: 6200 case e1000_82542_rev2_1:
6188 case e1000_82543: 6201 case e1000_82543:
@@ -6191,7 +6204,7 @@ e1000_led_on(struct e1000_hw *hw)
6191 ctrl |= E1000_CTRL_SWDPIO0; 6204 ctrl |= E1000_CTRL_SWDPIO0;
6192 break; 6205 break;
6193 case e1000_82544: 6206 case e1000_82544:
6194 if(hw->media_type == e1000_media_type_fiber) { 6207 if (hw->media_type == e1000_media_type_fiber) {
6195 /* Set SW Defineable Pin 0 to turn on the LED */ 6208 /* Set SW Defineable Pin 0 to turn on the LED */
6196 ctrl |= E1000_CTRL_SWDPIN0; 6209 ctrl |= E1000_CTRL_SWDPIN0;
6197 ctrl |= E1000_CTRL_SWDPIO0; 6210 ctrl |= E1000_CTRL_SWDPIO0;
@@ -6202,7 +6215,7 @@ e1000_led_on(struct e1000_hw *hw)
6202 } 6215 }
6203 break; 6216 break;
6204 default: 6217 default:
6205 if(hw->media_type == e1000_media_type_fiber) { 6218 if (hw->media_type == e1000_media_type_fiber) {
6206 /* Clear SW Defineable Pin 0 to turn on the LED */ 6219 /* Clear SW Defineable Pin 0 to turn on the LED */
6207 ctrl &= ~E1000_CTRL_SWDPIN0; 6220 ctrl &= ~E1000_CTRL_SWDPIN0;
6208 ctrl |= E1000_CTRL_SWDPIO0; 6221 ctrl |= E1000_CTRL_SWDPIO0;
@@ -6233,7 +6246,7 @@ e1000_led_off(struct e1000_hw *hw)
6233 6246
6234 DEBUGFUNC("e1000_led_off"); 6247 DEBUGFUNC("e1000_led_off");
6235 6248
6236 switch(hw->mac_type) { 6249 switch (hw->mac_type) {
6237 case e1000_82542_rev2_0: 6250 case e1000_82542_rev2_0:
6238 case e1000_82542_rev2_1: 6251 case e1000_82542_rev2_1:
6239 case e1000_82543: 6252 case e1000_82543:
@@ -6242,7 +6255,7 @@ e1000_led_off(struct e1000_hw *hw)
6242 ctrl |= E1000_CTRL_SWDPIO0; 6255 ctrl |= E1000_CTRL_SWDPIO0;
6243 break; 6256 break;
6244 case e1000_82544: 6257 case e1000_82544:
6245 if(hw->media_type == e1000_media_type_fiber) { 6258 if (hw->media_type == e1000_media_type_fiber) {
6246 /* Clear SW Defineable Pin 0 to turn off the LED */ 6259 /* Clear SW Defineable Pin 0 to turn off the LED */
6247 ctrl &= ~E1000_CTRL_SWDPIN0; 6260 ctrl &= ~E1000_CTRL_SWDPIN0;
6248 ctrl |= E1000_CTRL_SWDPIO0; 6261 ctrl |= E1000_CTRL_SWDPIO0;
@@ -6253,7 +6266,7 @@ e1000_led_off(struct e1000_hw *hw)
6253 } 6266 }
6254 break; 6267 break;
6255 default: 6268 default:
6256 if(hw->media_type == e1000_media_type_fiber) { 6269 if (hw->media_type == e1000_media_type_fiber) {
6257 /* Set SW Defineable Pin 0 to turn off the LED */ 6270 /* Set SW Defineable Pin 0 to turn off the LED */
6258 ctrl |= E1000_CTRL_SWDPIN0; 6271 ctrl |= E1000_CTRL_SWDPIN0;
6259 ctrl |= E1000_CTRL_SWDPIO0; 6272 ctrl |= E1000_CTRL_SWDPIO0;
@@ -6277,7 +6290,7 @@ e1000_led_off(struct e1000_hw *hw)
6277 * 6290 *
6278 * hw - Struct containing variables accessed by shared code 6291 * hw - Struct containing variables accessed by shared code
6279 *****************************************************************************/ 6292 *****************************************************************************/
6280static void 6293void
6281e1000_clear_hw_cntrs(struct e1000_hw *hw) 6294e1000_clear_hw_cntrs(struct e1000_hw *hw)
6282{ 6295{
6283 volatile uint32_t temp; 6296 volatile uint32_t temp;
@@ -6340,7 +6353,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
6340 temp = E1000_READ_REG(hw, MPTC); 6353 temp = E1000_READ_REG(hw, MPTC);
6341 temp = E1000_READ_REG(hw, BPTC); 6354 temp = E1000_READ_REG(hw, BPTC);
6342 6355
6343 if(hw->mac_type < e1000_82543) return; 6356 if (hw->mac_type < e1000_82543) return;
6344 6357
6345 temp = E1000_READ_REG(hw, ALGNERRC); 6358 temp = E1000_READ_REG(hw, ALGNERRC);
6346 temp = E1000_READ_REG(hw, RXERRC); 6359 temp = E1000_READ_REG(hw, RXERRC);
@@ -6349,13 +6362,13 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
6349 temp = E1000_READ_REG(hw, TSCTC); 6362 temp = E1000_READ_REG(hw, TSCTC);
6350 temp = E1000_READ_REG(hw, TSCTFC); 6363 temp = E1000_READ_REG(hw, TSCTFC);
6351 6364
6352 if(hw->mac_type <= e1000_82544) return; 6365 if (hw->mac_type <= e1000_82544) return;
6353 6366
6354 temp = E1000_READ_REG(hw, MGTPRC); 6367 temp = E1000_READ_REG(hw, MGTPRC);
6355 temp = E1000_READ_REG(hw, MGTPDC); 6368 temp = E1000_READ_REG(hw, MGTPDC);
6356 temp = E1000_READ_REG(hw, MGTPTC); 6369 temp = E1000_READ_REG(hw, MGTPTC);
6357 6370
6358 if(hw->mac_type <= e1000_82547_rev_2) return; 6371 if (hw->mac_type <= e1000_82547_rev_2) return;
6359 6372
6360 temp = E1000_READ_REG(hw, IAC); 6373 temp = E1000_READ_REG(hw, IAC);
6361 temp = E1000_READ_REG(hw, ICRXOC); 6374 temp = E1000_READ_REG(hw, ICRXOC);
@@ -6386,8 +6399,8 @@ e1000_reset_adaptive(struct e1000_hw *hw)
6386{ 6399{
6387 DEBUGFUNC("e1000_reset_adaptive"); 6400 DEBUGFUNC("e1000_reset_adaptive");
6388 6401
6389 if(hw->adaptive_ifs) { 6402 if (hw->adaptive_ifs) {
6390 if(!hw->ifs_params_forced) { 6403 if (!hw->ifs_params_forced) {
6391 hw->current_ifs_val = 0; 6404 hw->current_ifs_val = 0;
6392 hw->ifs_min_val = IFS_MIN; 6405 hw->ifs_min_val = IFS_MIN;
6393 hw->ifs_max_val = IFS_MAX; 6406 hw->ifs_max_val = IFS_MAX;
@@ -6414,12 +6427,12 @@ e1000_update_adaptive(struct e1000_hw *hw)
6414{ 6427{
6415 DEBUGFUNC("e1000_update_adaptive"); 6428 DEBUGFUNC("e1000_update_adaptive");
6416 6429
6417 if(hw->adaptive_ifs) { 6430 if (hw->adaptive_ifs) {
6418 if((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { 6431 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
6419 if(hw->tx_packet_delta > MIN_NUM_XMITS) { 6432 if (hw->tx_packet_delta > MIN_NUM_XMITS) {
6420 hw->in_ifs_mode = TRUE; 6433 hw->in_ifs_mode = TRUE;
6421 if(hw->current_ifs_val < hw->ifs_max_val) { 6434 if (hw->current_ifs_val < hw->ifs_max_val) {
6422 if(hw->current_ifs_val == 0) 6435 if (hw->current_ifs_val == 0)
6423 hw->current_ifs_val = hw->ifs_min_val; 6436 hw->current_ifs_val = hw->ifs_min_val;
6424 else 6437 else
6425 hw->current_ifs_val += hw->ifs_step_size; 6438 hw->current_ifs_val += hw->ifs_step_size;
@@ -6427,7 +6440,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
6427 } 6440 }
6428 } 6441 }
6429 } else { 6442 } else {
6430 if(hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { 6443 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
6431 hw->current_ifs_val = 0; 6444 hw->current_ifs_val = 0;
6432 hw->in_ifs_mode = FALSE; 6445 hw->in_ifs_mode = FALSE;
6433 E1000_WRITE_REG(hw, AIT, 0); 6446 E1000_WRITE_REG(hw, AIT, 0);
@@ -6474,46 +6487,46 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
6474 * This could be simplified if all environments supported 6487 * This could be simplified if all environments supported
6475 * 64-bit integers. 6488 * 64-bit integers.
6476 */ 6489 */
6477 if(carry_bit && ((stats->gorcl & 0x80000000) == 0)) 6490 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
6478 stats->gorch++; 6491 stats->gorch++;
6479 /* Is this a broadcast or multicast? Check broadcast first, 6492 /* Is this a broadcast or multicast? Check broadcast first,
6480 * since the test for a multicast frame will test positive on 6493 * since the test for a multicast frame will test positive on
6481 * a broadcast frame. 6494 * a broadcast frame.
6482 */ 6495 */
6483 if((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) 6496 if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
6484 /* Broadcast packet */ 6497 /* Broadcast packet */
6485 stats->bprc++; 6498 stats->bprc++;
6486 else if(*mac_addr & 0x01) 6499 else if (*mac_addr & 0x01)
6487 /* Multicast packet */ 6500 /* Multicast packet */
6488 stats->mprc++; 6501 stats->mprc++;
6489 6502
6490 if(frame_len == hw->max_frame_size) { 6503 if (frame_len == hw->max_frame_size) {
6491 /* In this case, the hardware has overcounted the number of 6504 /* In this case, the hardware has overcounted the number of
6492 * oversize frames. 6505 * oversize frames.
6493 */ 6506 */
6494 if(stats->roc > 0) 6507 if (stats->roc > 0)
6495 stats->roc--; 6508 stats->roc--;
6496 } 6509 }
6497 6510
6498 /* Adjust the bin counters when the extra byte put the frame in the 6511 /* Adjust the bin counters when the extra byte put the frame in the
6499 * wrong bin. Remember that the frame_len was adjusted above. 6512 * wrong bin. Remember that the frame_len was adjusted above.
6500 */ 6513 */
6501 if(frame_len == 64) { 6514 if (frame_len == 64) {
6502 stats->prc64++; 6515 stats->prc64++;
6503 stats->prc127--; 6516 stats->prc127--;
6504 } else if(frame_len == 127) { 6517 } else if (frame_len == 127) {
6505 stats->prc127++; 6518 stats->prc127++;
6506 stats->prc255--; 6519 stats->prc255--;
6507 } else if(frame_len == 255) { 6520 } else if (frame_len == 255) {
6508 stats->prc255++; 6521 stats->prc255++;
6509 stats->prc511--; 6522 stats->prc511--;
6510 } else if(frame_len == 511) { 6523 } else if (frame_len == 511) {
6511 stats->prc511++; 6524 stats->prc511++;
6512 stats->prc1023--; 6525 stats->prc1023--;
6513 } else if(frame_len == 1023) { 6526 } else if (frame_len == 1023) {
6514 stats->prc1023++; 6527 stats->prc1023++;
6515 stats->prc1522--; 6528 stats->prc1522--;
6516 } else if(frame_len == 1522) { 6529 } else if (frame_len == 1522) {
6517 stats->prc1522++; 6530 stats->prc1522++;
6518 } 6531 }
6519} 6532}
@@ -6553,10 +6566,10 @@ e1000_get_bus_info(struct e1000_hw *hw)
6553 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? 6566 hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
6554 e1000_bus_type_pcix : e1000_bus_type_pci; 6567 e1000_bus_type_pcix : e1000_bus_type_pci;
6555 6568
6556 if(hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { 6569 if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
6557 hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? 6570 hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
6558 e1000_bus_speed_66 : e1000_bus_speed_120; 6571 e1000_bus_speed_66 : e1000_bus_speed_120;
6559 } else if(hw->bus_type == e1000_bus_type_pci) { 6572 } else if (hw->bus_type == e1000_bus_type_pci) {
6560 hw->bus_speed = (status & E1000_STATUS_PCI66) ? 6573 hw->bus_speed = (status & E1000_STATUS_PCI66) ?
6561 e1000_bus_speed_66 : e1000_bus_speed_33; 6574 e1000_bus_speed_66 : e1000_bus_speed_33;
6562 } else { 6575 } else {
@@ -6587,6 +6600,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
6587 * hw - Struct containing variables accessed by shared code 6600 * hw - Struct containing variables accessed by shared code
6588 * offset - offset to read from 6601 * offset - offset to read from
6589 *****************************************************************************/ 6602 *****************************************************************************/
6603#if 0
6590uint32_t 6604uint32_t
6591e1000_read_reg_io(struct e1000_hw *hw, 6605e1000_read_reg_io(struct e1000_hw *hw,
6592 uint32_t offset) 6606 uint32_t offset)
@@ -6597,6 +6611,7 @@ e1000_read_reg_io(struct e1000_hw *hw,
6597 e1000_io_write(hw, io_addr, offset); 6611 e1000_io_write(hw, io_addr, offset);
6598 return e1000_io_read(hw, io_data); 6612 return e1000_io_read(hw, io_data);
6599} 6613}
6614#endif /* 0 */
6600 6615
6601/****************************************************************************** 6616/******************************************************************************
6602 * Writes a value to one of the devices registers using port I/O (as opposed to 6617 * Writes a value to one of the devices registers using port I/O (as opposed to
@@ -6649,11 +6664,11 @@ e1000_get_cable_length(struct e1000_hw *hw,
6649 *min_length = *max_length = 0; 6664 *min_length = *max_length = 0;
6650 6665
6651 /* Use old method for Phy older than IGP */ 6666 /* Use old method for Phy older than IGP */
6652 if(hw->phy_type == e1000_phy_m88) { 6667 if (hw->phy_type == e1000_phy_m88) {
6653 6668
6654 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 6669 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6655 &phy_data); 6670 &phy_data);
6656 if(ret_val) 6671 if (ret_val)
6657 return ret_val; 6672 return ret_val;
6658 cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 6673 cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
6659 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 6674 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
@@ -6712,7 +6727,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
6712 return -E1000_ERR_PHY; 6727 return -E1000_ERR_PHY;
6713 break; 6728 break;
6714 } 6729 }
6715 } else if(hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 6730 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
6716 uint16_t cur_agc_value; 6731 uint16_t cur_agc_value;
6717 uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 6732 uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6718 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6733 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
@@ -6721,10 +6736,10 @@ e1000_get_cable_length(struct e1000_hw *hw,
6721 IGP01E1000_PHY_AGC_C, 6736 IGP01E1000_PHY_AGC_C,
6722 IGP01E1000_PHY_AGC_D}; 6737 IGP01E1000_PHY_AGC_D};
6723 /* Read the AGC registers for all channels */ 6738 /* Read the AGC registers for all channels */
6724 for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 6739 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6725 6740
6726 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); 6741 ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
6727 if(ret_val) 6742 if (ret_val)
6728 return ret_val; 6743 return ret_val;
6729 6744
6730 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; 6745 cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
@@ -6774,7 +6789,7 @@ e1000_get_cable_length(struct e1000_hw *hw,
6774 if (ret_val) 6789 if (ret_val)
6775 return ret_val; 6790 return ret_val;
6776 6791
6777 /* Getting bits 15:9, which represent the combination of course and 6792 /* Getting bits 15:9, which represent the combination of course and
6778 * fine gain values. The result is a number that can be put into 6793 * fine gain values. The result is a number that can be put into
6779 * the lookup table to obtain the approximate cable length. */ 6794 * the lookup table to obtain the approximate cable length. */
6780 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 6795 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
@@ -6839,7 +6854,7 @@ e1000_check_polarity(struct e1000_hw *hw,
6839 /* return the Polarity bit in the Status register. */ 6854 /* return the Polarity bit in the Status register. */
6840 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 6855 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6841 &phy_data); 6856 &phy_data);
6842 if(ret_val) 6857 if (ret_val)
6843 return ret_val; 6858 return ret_val;
6844 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >> 6859 *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
6845 M88E1000_PSSR_REV_POLARITY_SHIFT; 6860 M88E1000_PSSR_REV_POLARITY_SHIFT;
@@ -6849,18 +6864,18 @@ e1000_check_polarity(struct e1000_hw *hw,
6849 /* Read the Status register to check the speed */ 6864 /* Read the Status register to check the speed */
6850 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, 6865 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
6851 &phy_data); 6866 &phy_data);
6852 if(ret_val) 6867 if (ret_val)
6853 return ret_val; 6868 return ret_val;
6854 6869
6855 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to 6870 /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
6856 * find the polarity status */ 6871 * find the polarity status */
6857 if((phy_data & IGP01E1000_PSSR_SPEED_MASK) == 6872 if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
6858 IGP01E1000_PSSR_SPEED_1000MBPS) { 6873 IGP01E1000_PSSR_SPEED_1000MBPS) {
6859 6874
6860 /* Read the GIG initialization PCS register (0x00B4) */ 6875 /* Read the GIG initialization PCS register (0x00B4) */
6861 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, 6876 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
6862 &phy_data); 6877 &phy_data);
6863 if(ret_val) 6878 if (ret_val)
6864 return ret_val; 6879 return ret_val;
6865 6880
6866 /* Check the polarity bits */ 6881 /* Check the polarity bits */
@@ -6909,7 +6924,7 @@ e1000_check_downshift(struct e1000_hw *hw)
6909 hw->phy_type == e1000_phy_igp_2) { 6924 hw->phy_type == e1000_phy_igp_2) {
6910 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 6925 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
6911 &phy_data); 6926 &phy_data);
6912 if(ret_val) 6927 if (ret_val)
6913 return ret_val; 6928 return ret_val;
6914 6929
6915 hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; 6930 hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
@@ -6917,7 +6932,7 @@ e1000_check_downshift(struct e1000_hw *hw)
6917 (hw->phy_type == e1000_phy_gg82563)) { 6932 (hw->phy_type == e1000_phy_gg82563)) {
6918 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, 6933 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
6919 &phy_data); 6934 &phy_data);
6920 if(ret_val) 6935 if (ret_val)
6921 return ret_val; 6936 return ret_val;
6922 6937
6923 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> 6938 hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
@@ -6957,42 +6972,42 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
6957 6972
6958 DEBUGFUNC("e1000_config_dsp_after_link_change"); 6973 DEBUGFUNC("e1000_config_dsp_after_link_change");
6959 6974
6960 if(hw->phy_type != e1000_phy_igp) 6975 if (hw->phy_type != e1000_phy_igp)
6961 return E1000_SUCCESS; 6976 return E1000_SUCCESS;
6962 6977
6963 if(link_up) { 6978 if (link_up) {
6964 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 6979 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
6965 if(ret_val) { 6980 if (ret_val) {
6966 DEBUGOUT("Error getting link speed and duplex\n"); 6981 DEBUGOUT("Error getting link speed and duplex\n");
6967 return ret_val; 6982 return ret_val;
6968 } 6983 }
6969 6984
6970 if(speed == SPEED_1000) { 6985 if (speed == SPEED_1000) {
6971 6986
6972 ret_val = e1000_get_cable_length(hw, &min_length, &max_length); 6987 ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
6973 if (ret_val) 6988 if (ret_val)
6974 return ret_val; 6989 return ret_val;
6975 6990
6976 if((hw->dsp_config_state == e1000_dsp_config_enabled) && 6991 if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
6977 min_length >= e1000_igp_cable_length_50) { 6992 min_length >= e1000_igp_cable_length_50) {
6978 6993
6979 for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 6994 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
6980 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], 6995 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
6981 &phy_data); 6996 &phy_data);
6982 if(ret_val) 6997 if (ret_val)
6983 return ret_val; 6998 return ret_val;
6984 6999
6985 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; 7000 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
6986 7001
6987 ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], 7002 ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
6988 phy_data); 7003 phy_data);
6989 if(ret_val) 7004 if (ret_val)
6990 return ret_val; 7005 return ret_val;
6991 } 7006 }
6992 hw->dsp_config_state = e1000_dsp_config_activated; 7007 hw->dsp_config_state = e1000_dsp_config_activated;
6993 } 7008 }
6994 7009
6995 if((hw->ffe_config_state == e1000_ffe_config_enabled) && 7010 if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
6996 (min_length < e1000_igp_cable_length_50)) { 7011 (min_length < e1000_igp_cable_length_50)) {
6997 7012
6998 uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; 7013 uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
@@ -7001,70 +7016,70 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7001 /* clear previous idle error counts */ 7016 /* clear previous idle error counts */
7002 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, 7017 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
7003 &phy_data); 7018 &phy_data);
7004 if(ret_val) 7019 if (ret_val)
7005 return ret_val; 7020 return ret_val;
7006 7021
7007 for(i = 0; i < ffe_idle_err_timeout; i++) { 7022 for (i = 0; i < ffe_idle_err_timeout; i++) {
7008 udelay(1000); 7023 udelay(1000);
7009 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, 7024 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
7010 &phy_data); 7025 &phy_data);
7011 if(ret_val) 7026 if (ret_val)
7012 return ret_val; 7027 return ret_val;
7013 7028
7014 idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); 7029 idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
7015 if(idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { 7030 if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
7016 hw->ffe_config_state = e1000_ffe_config_active; 7031 hw->ffe_config_state = e1000_ffe_config_active;
7017 7032
7018 ret_val = e1000_write_phy_reg(hw, 7033 ret_val = e1000_write_phy_reg(hw,
7019 IGP01E1000_PHY_DSP_FFE, 7034 IGP01E1000_PHY_DSP_FFE,
7020 IGP01E1000_PHY_DSP_FFE_CM_CP); 7035 IGP01E1000_PHY_DSP_FFE_CM_CP);
7021 if(ret_val) 7036 if (ret_val)
7022 return ret_val; 7037 return ret_val;
7023 break; 7038 break;
7024 } 7039 }
7025 7040
7026 if(idle_errs) 7041 if (idle_errs)
7027 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100; 7042 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
7028 } 7043 }
7029 } 7044 }
7030 } 7045 }
7031 } else { 7046 } else {
7032 if(hw->dsp_config_state == e1000_dsp_config_activated) { 7047 if (hw->dsp_config_state == e1000_dsp_config_activated) {
7033 /* Save off the current value of register 0x2F5B to be restored at 7048 /* Save off the current value of register 0x2F5B to be restored at
7034 * the end of the routines. */ 7049 * the end of the routines. */
7035 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); 7050 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
7036 7051
7037 if(ret_val) 7052 if (ret_val)
7038 return ret_val; 7053 return ret_val;
7039 7054
7040 /* Disable the PHY transmitter */ 7055 /* Disable the PHY transmitter */
7041 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); 7056 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
7042 7057
7043 if(ret_val) 7058 if (ret_val)
7044 return ret_val; 7059 return ret_val;
7045 7060
7046 msec_delay_irq(20); 7061 msec_delay_irq(20);
7047 7062
7048 ret_val = e1000_write_phy_reg(hw, 0x0000, 7063 ret_val = e1000_write_phy_reg(hw, 0x0000,
7049 IGP01E1000_IEEE_FORCE_GIGA); 7064 IGP01E1000_IEEE_FORCE_GIGA);
7050 if(ret_val) 7065 if (ret_val)
7051 return ret_val; 7066 return ret_val;
7052 for(i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 7067 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
7053 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data); 7068 ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
7054 if(ret_val) 7069 if (ret_val)
7055 return ret_val; 7070 return ret_val;
7056 7071
7057 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; 7072 phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
7058 phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; 7073 phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
7059 7074
7060 ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data); 7075 ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
7061 if(ret_val) 7076 if (ret_val)
7062 return ret_val; 7077 return ret_val;
7063 } 7078 }
7064 7079
7065 ret_val = e1000_write_phy_reg(hw, 0x0000, 7080 ret_val = e1000_write_phy_reg(hw, 0x0000,
7066 IGP01E1000_IEEE_RESTART_AUTONEG); 7081 IGP01E1000_IEEE_RESTART_AUTONEG);
7067 if(ret_val) 7082 if (ret_val)
7068 return ret_val; 7083 return ret_val;
7069 7084
7070 msec_delay_irq(20); 7085 msec_delay_irq(20);
@@ -7072,40 +7087,40 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7072 /* Now enable the transmitter */ 7087 /* Now enable the transmitter */
7073 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 7088 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
7074 7089
7075 if(ret_val) 7090 if (ret_val)
7076 return ret_val; 7091 return ret_val;
7077 7092
7078 hw->dsp_config_state = e1000_dsp_config_enabled; 7093 hw->dsp_config_state = e1000_dsp_config_enabled;
7079 } 7094 }
7080 7095
7081 if(hw->ffe_config_state == e1000_ffe_config_active) { 7096 if (hw->ffe_config_state == e1000_ffe_config_active) {
7082 /* Save off the current value of register 0x2F5B to be restored at 7097 /* Save off the current value of register 0x2F5B to be restored at
7083 * the end of the routines. */ 7098 * the end of the routines. */
7084 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); 7099 ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
7085 7100
7086 if(ret_val) 7101 if (ret_val)
7087 return ret_val; 7102 return ret_val;
7088 7103
7089 /* Disable the PHY transmitter */ 7104 /* Disable the PHY transmitter */
7090 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); 7105 ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
7091 7106
7092 if(ret_val) 7107 if (ret_val)
7093 return ret_val; 7108 return ret_val;
7094 7109
7095 msec_delay_irq(20); 7110 msec_delay_irq(20);
7096 7111
7097 ret_val = e1000_write_phy_reg(hw, 0x0000, 7112 ret_val = e1000_write_phy_reg(hw, 0x0000,
7098 IGP01E1000_IEEE_FORCE_GIGA); 7113 IGP01E1000_IEEE_FORCE_GIGA);
7099 if(ret_val) 7114 if (ret_val)
7100 return ret_val; 7115 return ret_val;
7101 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, 7116 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
7102 IGP01E1000_PHY_DSP_FFE_DEFAULT); 7117 IGP01E1000_PHY_DSP_FFE_DEFAULT);
7103 if(ret_val) 7118 if (ret_val)
7104 return ret_val; 7119 return ret_val;
7105 7120
7106 ret_val = e1000_write_phy_reg(hw, 0x0000, 7121 ret_val = e1000_write_phy_reg(hw, 0x0000,
7107 IGP01E1000_IEEE_RESTART_AUTONEG); 7122 IGP01E1000_IEEE_RESTART_AUTONEG);
7108 if(ret_val) 7123 if (ret_val)
7109 return ret_val; 7124 return ret_val;
7110 7125
7111 msec_delay_irq(20); 7126 msec_delay_irq(20);
@@ -7113,7 +7128,7 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7113 /* Now enable the transmitter */ 7128 /* Now enable the transmitter */
7114 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 7129 ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
7115 7130
7116 if(ret_val) 7131 if (ret_val)
7117 return ret_val; 7132 return ret_val;
7118 7133
7119 hw->ffe_config_state = e1000_ffe_config_enabled; 7134 hw->ffe_config_state = e1000_ffe_config_enabled;
@@ -7138,20 +7153,20 @@ e1000_set_phy_mode(struct e1000_hw *hw)
7138 7153
7139 DEBUGFUNC("e1000_set_phy_mode"); 7154 DEBUGFUNC("e1000_set_phy_mode");
7140 7155
7141 if((hw->mac_type == e1000_82545_rev_3) && 7156 if ((hw->mac_type == e1000_82545_rev_3) &&
7142 (hw->media_type == e1000_media_type_copper)) { 7157 (hw->media_type == e1000_media_type_copper)) {
7143 ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data); 7158 ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
7144 if(ret_val) { 7159 if (ret_val) {
7145 return ret_val; 7160 return ret_val;
7146 } 7161 }
7147 7162
7148 if((eeprom_data != EEPROM_RESERVED_WORD) && 7163 if ((eeprom_data != EEPROM_RESERVED_WORD) &&
7149 (eeprom_data & EEPROM_PHY_CLASS_A)) { 7164 (eeprom_data & EEPROM_PHY_CLASS_A)) {
7150 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B); 7165 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
7151 if(ret_val) 7166 if (ret_val)
7152 return ret_val; 7167 return ret_val;
7153 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104); 7168 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
7154 if(ret_val) 7169 if (ret_val)
7155 return ret_val; 7170 return ret_val;
7156 7171
7157 hw->phy_reset_disable = FALSE; 7172 hw->phy_reset_disable = FALSE;
@@ -7202,16 +7217,16 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7202 phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); 7217 phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
7203 } else { 7218 } else {
7204 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 7219 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
7205 if(ret_val) 7220 if (ret_val)
7206 return ret_val; 7221 return ret_val;
7207 } 7222 }
7208 7223
7209 if(!active) { 7224 if (!active) {
7210 if(hw->mac_type == e1000_82541_rev_2 || 7225 if (hw->mac_type == e1000_82541_rev_2 ||
7211 hw->mac_type == e1000_82547_rev_2) { 7226 hw->mac_type == e1000_82547_rev_2) {
7212 phy_data &= ~IGP01E1000_GMII_FLEX_SPD; 7227 phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
7213 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 7228 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
7214 if(ret_val) 7229 if (ret_val)
7215 return ret_val; 7230 return ret_val;
7216 } else { 7231 } else {
7217 if (hw->mac_type == e1000_ich8lan) { 7232 if (hw->mac_type == e1000_ich8lan) {
@@ -7233,13 +7248,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7233 if (hw->smart_speed == e1000_smart_speed_on) { 7248 if (hw->smart_speed == e1000_smart_speed_on) {
7234 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7249 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7235 &phy_data); 7250 &phy_data);
7236 if(ret_val) 7251 if (ret_val)
7237 return ret_val; 7252 return ret_val;
7238 7253
7239 phy_data |= IGP01E1000_PSCFR_SMART_SPEED; 7254 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
7240 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7255 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7241 phy_data); 7256 phy_data);
7242 if(ret_val) 7257 if (ret_val)
7243 return ret_val; 7258 return ret_val;
7244 } else if (hw->smart_speed == e1000_smart_speed_off) { 7259 } else if (hw->smart_speed == e1000_smart_speed_off) {
7245 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7260 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
@@ -7250,19 +7265,19 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7250 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 7265 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7251 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7266 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7252 phy_data); 7267 phy_data);
7253 if(ret_val) 7268 if (ret_val)
7254 return ret_val; 7269 return ret_val;
7255 } 7270 }
7256 7271
7257 } else if((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) || 7272 } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
7258 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) || 7273 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
7259 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) { 7274 (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
7260 7275
7261 if(hw->mac_type == e1000_82541_rev_2 || 7276 if (hw->mac_type == e1000_82541_rev_2 ||
7262 hw->mac_type == e1000_82547_rev_2) { 7277 hw->mac_type == e1000_82547_rev_2) {
7263 phy_data |= IGP01E1000_GMII_FLEX_SPD; 7278 phy_data |= IGP01E1000_GMII_FLEX_SPD;
7264 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data); 7279 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
7265 if(ret_val) 7280 if (ret_val)
7266 return ret_val; 7281 return ret_val;
7267 } else { 7282 } else {
7268 if (hw->mac_type == e1000_ich8lan) { 7283 if (hw->mac_type == e1000_ich8lan) {
@@ -7279,12 +7294,12 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7279 7294
7280 /* When LPLU is enabled we should disable SmartSpeed */ 7295 /* When LPLU is enabled we should disable SmartSpeed */
7281 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 7296 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
7282 if(ret_val) 7297 if (ret_val)
7283 return ret_val; 7298 return ret_val;
7284 7299
7285 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 7300 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7286 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); 7301 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
7287 if(ret_val) 7302 if (ret_val)
7288 return ret_val; 7303 return ret_val;
7289 7304
7290 } 7305 }
@@ -7314,14 +7329,14 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
7314 uint16_t phy_data; 7329 uint16_t phy_data;
7315 DEBUGFUNC("e1000_set_d0_lplu_state"); 7330 DEBUGFUNC("e1000_set_d0_lplu_state");
7316 7331
7317 if(hw->mac_type <= e1000_82547_rev_2) 7332 if (hw->mac_type <= e1000_82547_rev_2)
7318 return E1000_SUCCESS; 7333 return E1000_SUCCESS;
7319 7334
7320 if (hw->mac_type == e1000_ich8lan) { 7335 if (hw->mac_type == e1000_ich8lan) {
7321 phy_ctrl = E1000_READ_REG(hw, PHY_CTRL); 7336 phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
7322 } else { 7337 } else {
7323 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 7338 ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
7324 if(ret_val) 7339 if (ret_val)
7325 return ret_val; 7340 return ret_val;
7326 } 7341 }
7327 7342
@@ -7343,13 +7358,13 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
7343 if (hw->smart_speed == e1000_smart_speed_on) { 7358 if (hw->smart_speed == e1000_smart_speed_on) {
7344 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7359 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7345 &phy_data); 7360 &phy_data);
7346 if(ret_val) 7361 if (ret_val)
7347 return ret_val; 7362 return ret_val;
7348 7363
7349 phy_data |= IGP01E1000_PSCFR_SMART_SPEED; 7364 phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
7350 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7365 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7351 phy_data); 7366 phy_data);
7352 if(ret_val) 7367 if (ret_val)
7353 return ret_val; 7368 return ret_val;
7354 } else if (hw->smart_speed == e1000_smart_speed_off) { 7369 } else if (hw->smart_speed == e1000_smart_speed_off) {
7355 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7370 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
@@ -7360,7 +7375,7 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
7360 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 7375 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7361 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, 7376 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
7362 phy_data); 7377 phy_data);
7363 if(ret_val) 7378 if (ret_val)
7364 return ret_val; 7379 return ret_val;
7365 } 7380 }
7366 7381
@@ -7379,12 +7394,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
7379 7394
7380 /* When LPLU is enabled we should disable SmartSpeed */ 7395 /* When LPLU is enabled we should disable SmartSpeed */
7381 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data); 7396 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
7382 if(ret_val) 7397 if (ret_val)
7383 return ret_val; 7398 return ret_val;
7384 7399
7385 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; 7400 phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
7386 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data); 7401 ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
7387 if(ret_val) 7402 if (ret_val)
7388 return ret_val; 7403 return ret_val;
7389 7404
7390 } 7405 }
@@ -7405,7 +7420,7 @@ e1000_set_vco_speed(struct e1000_hw *hw)
7405 7420
7406 DEBUGFUNC("e1000_set_vco_speed"); 7421 DEBUGFUNC("e1000_set_vco_speed");
7407 7422
7408 switch(hw->mac_type) { 7423 switch (hw->mac_type) {
7409 case e1000_82545_rev_3: 7424 case e1000_82545_rev_3:
7410 case e1000_82546_rev_3: 7425 case e1000_82546_rev_3:
7411 break; 7426 break;
@@ -7416,39 +7431,39 @@ e1000_set_vco_speed(struct e1000_hw *hw)
7416 /* Set PHY register 30, page 5, bit 8 to 0 */ 7431 /* Set PHY register 30, page 5, bit 8 to 0 */
7417 7432
7418 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); 7433 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
7419 if(ret_val) 7434 if (ret_val)
7420 return ret_val; 7435 return ret_val;
7421 7436
7422 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); 7437 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
7423 if(ret_val) 7438 if (ret_val)
7424 return ret_val; 7439 return ret_val;
7425 7440
7426 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); 7441 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
7427 if(ret_val) 7442 if (ret_val)
7428 return ret_val; 7443 return ret_val;
7429 7444
7430 phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; 7445 phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
7431 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); 7446 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
7432 if(ret_val) 7447 if (ret_val)
7433 return ret_val; 7448 return ret_val;
7434 7449
7435 /* Set PHY register 30, page 4, bit 11 to 1 */ 7450 /* Set PHY register 30, page 4, bit 11 to 1 */
7436 7451
7437 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); 7452 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
7438 if(ret_val) 7453 if (ret_val)
7439 return ret_val; 7454 return ret_val;
7440 7455
7441 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); 7456 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
7442 if(ret_val) 7457 if (ret_val)
7443 return ret_val; 7458 return ret_val;
7444 7459
7445 phy_data |= M88E1000_PHY_VCO_REG_BIT11; 7460 phy_data |= M88E1000_PHY_VCO_REG_BIT11;
7446 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); 7461 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
7447 if(ret_val) 7462 if (ret_val)
7448 return ret_val; 7463 return ret_val;
7449 7464
7450 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); 7465 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
7451 if(ret_val) 7466 if (ret_val)
7452 return ret_val; 7467 return ret_val;
7453 7468
7454 return E1000_SUCCESS; 7469 return E1000_SUCCESS;
@@ -7527,7 +7542,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7527{ 7542{
7528 uint8_t *tmp; 7543 uint8_t *tmp;
7529 uint8_t *bufptr = buffer; 7544 uint8_t *bufptr = buffer;
7530 uint32_t data; 7545 uint32_t data = 0;
7531 uint16_t remaining, i, j, prev_bytes; 7546 uint16_t remaining, i, j, prev_bytes;
7532 7547
7533 /* sum = only sum of the data and it is not checksum */ 7548 /* sum = only sum of the data and it is not checksum */
@@ -7607,7 +7622,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
7607 7622
7608 buffer = (uint8_t *) hdr; 7623 buffer = (uint8_t *) hdr;
7609 i = length; 7624 i = length;
7610 while(i--) 7625 while (i--)
7611 sum += buffer[i]; 7626 sum += buffer[i];
7612 7627
7613 hdr->checksum = 0 - sum; 7628 hdr->checksum = 0 - sum;
@@ -7630,8 +7645,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
7630 * returns - E1000_SUCCESS for success. 7645 * returns - E1000_SUCCESS for success.
7631 ****************************************************************************/ 7646 ****************************************************************************/
7632static int32_t 7647static int32_t
7633e1000_mng_write_commit( 7648e1000_mng_write_commit(struct e1000_hw * hw)
7634 struct e1000_hw * hw)
7635{ 7649{
7636 uint32_t hicr; 7650 uint32_t hicr;
7637 7651
@@ -7803,31 +7817,31 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7803 /* Disable the transmitter on the PHY */ 7817 /* Disable the transmitter on the PHY */
7804 7818
7805 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); 7819 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7806 if(ret_val) 7820 if (ret_val)
7807 return ret_val; 7821 return ret_val;
7808 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); 7822 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
7809 if(ret_val) 7823 if (ret_val)
7810 return ret_val; 7824 return ret_val;
7811 7825
7812 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); 7826 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7813 if(ret_val) 7827 if (ret_val)
7814 return ret_val; 7828 return ret_val;
7815 7829
7816 /* This loop will early-out if the NO link condition has been met. */ 7830 /* This loop will early-out if the NO link condition has been met. */
7817 for(i = PHY_FORCE_TIME; i > 0; i--) { 7831 for (i = PHY_FORCE_TIME; i > 0; i--) {
7818 /* Read the MII Status Register and wait for Link Status bit 7832 /* Read the MII Status Register and wait for Link Status bit
7819 * to be clear. 7833 * to be clear.
7820 */ 7834 */
7821 7835
7822 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 7836 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7823 if(ret_val) 7837 if (ret_val)
7824 return ret_val; 7838 return ret_val;
7825 7839
7826 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 7840 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7827 if(ret_val) 7841 if (ret_val)
7828 return ret_val; 7842 return ret_val;
7829 7843
7830 if((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break; 7844 if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
7831 msec_delay_irq(100); 7845 msec_delay_irq(100);
7832 } 7846 }
7833 7847
@@ -7837,40 +7851,40 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7837 /* Now we will re-enable th transmitter on the PHY */ 7851 /* Now we will re-enable th transmitter on the PHY */
7838 7852
7839 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); 7853 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
7840 if(ret_val) 7854 if (ret_val)
7841 return ret_val; 7855 return ret_val;
7842 msec_delay_irq(50); 7856 msec_delay_irq(50);
7843 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); 7857 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
7844 if(ret_val) 7858 if (ret_val)
7845 return ret_val; 7859 return ret_val;
7846 msec_delay_irq(50); 7860 msec_delay_irq(50);
7847 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); 7861 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
7848 if(ret_val) 7862 if (ret_val)
7849 return ret_val; 7863 return ret_val;
7850 msec_delay_irq(50); 7864 msec_delay_irq(50);
7851 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); 7865 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
7852 if(ret_val) 7866 if (ret_val)
7853 return ret_val; 7867 return ret_val;
7854 7868
7855 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); 7869 ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
7856 if(ret_val) 7870 if (ret_val)
7857 return ret_val; 7871 return ret_val;
7858 7872
7859 /* This loop will early-out if the link condition has been met. */ 7873 /* This loop will early-out if the link condition has been met. */
7860 for(i = PHY_FORCE_TIME; i > 0; i--) { 7874 for (i = PHY_FORCE_TIME; i > 0; i--) {
7861 /* Read the MII Status Register and wait for Link Status bit 7875 /* Read the MII Status Register and wait for Link Status bit
7862 * to be set. 7876 * to be set.
7863 */ 7877 */
7864 7878
7865 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 7879 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7866 if(ret_val) 7880 if (ret_val)
7867 return ret_val; 7881 return ret_val;
7868 7882
7869 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); 7883 ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
7870 if(ret_val) 7884 if (ret_val)
7871 return ret_val; 7885 return ret_val;
7872 7886
7873 if(mii_status_reg & MII_SR_LINK_STATUS) break; 7887 if (mii_status_reg & MII_SR_LINK_STATUS) break;
7874 msec_delay_irq(100); 7888 msec_delay_irq(100);
7875 } 7889 }
7876 return E1000_SUCCESS; 7890 return E1000_SUCCESS;
@@ -7909,6 +7923,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7909 * returns: - none. 7923 * returns: - none.
7910 * 7924 *
7911 ***************************************************************************/ 7925 ***************************************************************************/
7926#if 0
7912void 7927void
7913e1000_enable_pciex_master(struct e1000_hw *hw) 7928e1000_enable_pciex_master(struct e1000_hw *hw)
7914{ 7929{
@@ -7923,6 +7938,7 @@ e1000_enable_pciex_master(struct e1000_hw *hw)
7923 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; 7938 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
7924 E1000_WRITE_REG(hw, CTRL, ctrl); 7939 E1000_WRITE_REG(hw, CTRL, ctrl);
7925} 7940}
7941#endif /* 0 */
7926 7942
7927/******************************************************************************* 7943/*******************************************************************************
7928 * 7944 *
@@ -7947,15 +7963,15 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
7947 7963
7948 e1000_set_pci_express_master_disable(hw); 7964 e1000_set_pci_express_master_disable(hw);
7949 7965
7950 while(timeout) { 7966 while (timeout) {
7951 if(!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 7967 if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
7952 break; 7968 break;
7953 else 7969 else
7954 udelay(100); 7970 udelay(100);
7955 timeout--; 7971 timeout--;
7956 } 7972 }
7957 7973
7958 if(!timeout) { 7974 if (!timeout) {
7959 DEBUGOUT("Master requests are pending.\n"); 7975 DEBUGOUT("Master requests are pending.\n");
7960 return -E1000_ERR_MASTER_REQUESTS_PENDING; 7976 return -E1000_ERR_MASTER_REQUESTS_PENDING;
7961 } 7977 }
@@ -7996,7 +8012,7 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
7996 timeout--; 8012 timeout--;
7997 } 8013 }
7998 8014
7999 if(!timeout) { 8015 if (!timeout) {
8000 DEBUGOUT("Auto read by HW from EEPROM has not completed.\n"); 8016 DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
8001 return -E1000_ERR_RESET; 8017 return -E1000_ERR_RESET;
8002 } 8018 }
@@ -8077,7 +8093,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
8077 8093
8078 DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); 8094 DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
8079 8095
8080 if(!hw->eeprom_semaphore_present) 8096 if (!hw->eeprom_semaphore_present)
8081 return E1000_SUCCESS; 8097 return E1000_SUCCESS;
8082 8098
8083 if (hw->mac_type == e1000_80003es2lan) { 8099 if (hw->mac_type == e1000_80003es2lan) {
@@ -8088,20 +8104,20 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
8088 8104
8089 /* Get the FW semaphore. */ 8105 /* Get the FW semaphore. */
8090 timeout = hw->eeprom.word_size + 1; 8106 timeout = hw->eeprom.word_size + 1;
8091 while(timeout) { 8107 while (timeout) {
8092 swsm = E1000_READ_REG(hw, SWSM); 8108 swsm = E1000_READ_REG(hw, SWSM);
8093 swsm |= E1000_SWSM_SWESMBI; 8109 swsm |= E1000_SWSM_SWESMBI;
8094 E1000_WRITE_REG(hw, SWSM, swsm); 8110 E1000_WRITE_REG(hw, SWSM, swsm);
8095 /* if we managed to set the bit we got the semaphore. */ 8111 /* if we managed to set the bit we got the semaphore. */
8096 swsm = E1000_READ_REG(hw, SWSM); 8112 swsm = E1000_READ_REG(hw, SWSM);
8097 if(swsm & E1000_SWSM_SWESMBI) 8113 if (swsm & E1000_SWSM_SWESMBI)
8098 break; 8114 break;
8099 8115
8100 udelay(50); 8116 udelay(50);
8101 timeout--; 8117 timeout--;
8102 } 8118 }
8103 8119
8104 if(!timeout) { 8120 if (!timeout) {
8105 /* Release semaphores */ 8121 /* Release semaphores */
8106 e1000_put_hw_eeprom_semaphore(hw); 8122 e1000_put_hw_eeprom_semaphore(hw);
8107 DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n"); 8123 DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
@@ -8126,7 +8142,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8126 8142
8127 DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); 8143 DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
8128 8144
8129 if(!hw->eeprom_semaphore_present) 8145 if (!hw->eeprom_semaphore_present)
8130 return; 8146 return;
8131 8147
8132 swsm = E1000_READ_REG(hw, SWSM); 8148 swsm = E1000_READ_REG(hw, SWSM);
@@ -8148,7 +8164,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8148 * E1000_SUCCESS at any other case. 8164 * E1000_SUCCESS at any other case.
8149 * 8165 *
8150 ***************************************************************************/ 8166 ***************************************************************************/
8151int32_t 8167static int32_t
8152e1000_get_software_semaphore(struct e1000_hw *hw) 8168e1000_get_software_semaphore(struct e1000_hw *hw)
8153{ 8169{
8154 int32_t timeout = hw->eeprom.word_size + 1; 8170 int32_t timeout = hw->eeprom.word_size + 1;
@@ -8159,16 +8175,16 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
8159 if (hw->mac_type != e1000_80003es2lan) 8175 if (hw->mac_type != e1000_80003es2lan)
8160 return E1000_SUCCESS; 8176 return E1000_SUCCESS;
8161 8177
8162 while(timeout) { 8178 while (timeout) {
8163 swsm = E1000_READ_REG(hw, SWSM); 8179 swsm = E1000_READ_REG(hw, SWSM);
8164 /* If SMBI bit cleared, it is now set and we hold the semaphore */ 8180 /* If SMBI bit cleared, it is now set and we hold the semaphore */
8165 if(!(swsm & E1000_SWSM_SMBI)) 8181 if (!(swsm & E1000_SWSM_SMBI))
8166 break; 8182 break;
8167 msec_delay_irq(1); 8183 msec_delay_irq(1);
8168 timeout--; 8184 timeout--;
8169 } 8185 }
8170 8186
8171 if(!timeout) { 8187 if (!timeout) {
8172 DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); 8188 DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
8173 return -E1000_ERR_RESET; 8189 return -E1000_ERR_RESET;
8174 } 8190 }
@@ -8183,7 +8199,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
8183 * hw: Struct containing variables accessed by shared code 8199 * hw: Struct containing variables accessed by shared code
8184 * 8200 *
8185 ***************************************************************************/ 8201 ***************************************************************************/
8186void 8202static void
8187e1000_release_software_semaphore(struct e1000_hw *hw) 8203e1000_release_software_semaphore(struct e1000_hw *hw)
8188{ 8204{
8189 uint32_t swsm; 8205 uint32_t swsm;
@@ -8244,7 +8260,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8244 case e1000_82573: 8260 case e1000_82573:
8245 case e1000_80003es2lan: 8261 case e1000_80003es2lan:
8246 fwsm = E1000_READ_REG(hw, FWSM); 8262 fwsm = E1000_READ_REG(hw, FWSM);
8247 if((fwsm & E1000_FWSM_MODE_MASK) != 0) 8263 if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
8248 return TRUE; 8264 return TRUE;
8249 break; 8265 break;
8250 case e1000_ich8lan: 8266 case e1000_ich8lan:
@@ -8265,7 +8281,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8265 * returns: E1000_SUCCESS 8281 * returns: E1000_SUCCESS
8266 * 8282 *
8267 *****************************************************************************/ 8283 *****************************************************************************/
8268int32_t 8284static int32_t
8269e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) 8285e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8270{ 8286{
8271 uint32_t gcr_reg = 0; 8287 uint32_t gcr_reg = 0;
@@ -8306,7 +8322,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8306 * hw: Struct containing variables accessed by shared code 8322 * hw: Struct containing variables accessed by shared code
8307 * 8323 *
8308 ***************************************************************************/ 8324 ***************************************************************************/
8309int32_t 8325static int32_t
8310e1000_get_software_flag(struct e1000_hw *hw) 8326e1000_get_software_flag(struct e1000_hw *hw)
8311{ 8327{
8312 int32_t timeout = PHY_CFG_TIMEOUT; 8328 int32_t timeout = PHY_CFG_TIMEOUT;
@@ -8345,7 +8361,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
8345 * hw: Struct containing variables accessed by shared code 8361 * hw: Struct containing variables accessed by shared code
8346 * 8362 *
8347 ***************************************************************************/ 8363 ***************************************************************************/
8348void 8364static void
8349e1000_release_software_flag(struct e1000_hw *hw) 8365e1000_release_software_flag(struct e1000_hw *hw)
8350{ 8366{
8351 uint32_t extcnf_ctrl; 8367 uint32_t extcnf_ctrl;
@@ -8369,6 +8385,7 @@ e1000_release_software_flag(struct e1000_hw *hw)
8369 * hw: Struct containing variables accessed by shared code 8385 * hw: Struct containing variables accessed by shared code
8370 * 8386 *
8371 ***************************************************************************/ 8387 ***************************************************************************/
8388#if 0
8372int32_t 8389int32_t
8373e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) 8390e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8374{ 8391{
@@ -8388,6 +8405,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8388 8405
8389 return ret_val; 8406 return ret_val;
8390} 8407}
8408#endif /* 0 */
8391 8409
8392/*************************************************************************** 8410/***************************************************************************
8393 * 8411 *
@@ -8397,6 +8415,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8397 * hw: Struct containing variables accessed by shared code 8415 * hw: Struct containing variables accessed by shared code
8398 * 8416 *
8399 ***************************************************************************/ 8417 ***************************************************************************/
8418#if 0
8400int32_t 8419int32_t
8401e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) 8420e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8402{ 8421{
@@ -8416,6 +8435,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8416 8435
8417 return ret_val; 8436 return ret_val;
8418} 8437}
8438#endif /* 0 */
8419 8439
8420/****************************************************************************** 8440/******************************************************************************
8421 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 8441 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
@@ -8426,7 +8446,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8426 * data - word read from the EEPROM 8446 * data - word read from the EEPROM
8427 * words - number of words to read 8447 * words - number of words to read
8428 *****************************************************************************/ 8448 *****************************************************************************/
8429int32_t 8449static int32_t
8430e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8450e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8431 uint16_t *data) 8451 uint16_t *data)
8432{ 8452{
@@ -8482,7 +8502,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8482 * words - number of words to write 8502 * words - number of words to write
8483 * data - words to write to the EEPROM 8503 * data - words to write to the EEPROM
8484 *****************************************************************************/ 8504 *****************************************************************************/
8485int32_t 8505static int32_t
8486e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8506e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8487 uint16_t *data) 8507 uint16_t *data)
8488{ 8508{
@@ -8529,7 +8549,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8529 * 8549 *
8530 * hw - The pointer to the hw structure 8550 * hw - The pointer to the hw structure
8531 ****************************************************************************/ 8551 ****************************************************************************/
8532int32_t 8552static int32_t
8533e1000_ich8_cycle_init(struct e1000_hw *hw) 8553e1000_ich8_cycle_init(struct e1000_hw *hw)
8534{ 8554{
8535 union ich8_hws_flash_status hsfsts; 8555 union ich8_hws_flash_status hsfsts;
@@ -8596,7 +8616,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
8596 * 8616 *
8597 * hw - The pointer to the hw structure 8617 * hw - The pointer to the hw structure
8598 ****************************************************************************/ 8618 ****************************************************************************/
8599int32_t 8619static int32_t
8600e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) 8620e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8601{ 8621{
8602 union ich8_hws_flash_ctrl hsflctl; 8622 union ich8_hws_flash_ctrl hsflctl;
@@ -8631,7 +8651,7 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8631 * size - Size of data to read, 1=byte 2=word 8651 * size - Size of data to read, 1=byte 2=word
8632 * data - Pointer to the word to store the value read. 8652 * data - Pointer to the word to store the value read.
8633 *****************************************************************************/ 8653 *****************************************************************************/
8634int32_t 8654static int32_t
8635e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, 8655e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8636 uint32_t size, uint16_t* data) 8656 uint32_t size, uint16_t* data)
8637{ 8657{
@@ -8710,7 +8730,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8710 * size - Size of data to read, 1=byte 2=word 8730 * size - Size of data to read, 1=byte 2=word
8711 * data - The byte(s) to write to the NVM. 8731 * data - The byte(s) to write to the NVM.
8712 *****************************************************************************/ 8732 *****************************************************************************/
8713int32_t 8733static int32_t
8714e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, 8734e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8715 uint16_t data) 8735 uint16_t data)
8716{ 8736{
@@ -8785,7 +8805,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8785 * index - The index of the byte to read. 8805 * index - The index of the byte to read.
8786 * data - Pointer to a byte to store the value read. 8806 * data - Pointer to a byte to store the value read.
8787 *****************************************************************************/ 8807 *****************************************************************************/
8788int32_t 8808static int32_t
8789e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) 8809e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8790{ 8810{
8791 int32_t status = E1000_SUCCESS; 8811 int32_t status = E1000_SUCCESS;
@@ -8808,7 +8828,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8808 * index - The index of the byte to write. 8828 * index - The index of the byte to write.
8809 * byte - The byte to write to the NVM. 8829 * byte - The byte to write to the NVM.
8810 *****************************************************************************/ 8830 *****************************************************************************/
8811int32_t 8831static int32_t
8812e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) 8832e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8813{ 8833{
8814 int32_t error = E1000_SUCCESS; 8834 int32_t error = E1000_SUCCESS;
@@ -8839,7 +8859,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8839 * index - The index of the byte to read. 8859 * index - The index of the byte to read.
8840 * data - The byte to write to the NVM. 8860 * data - The byte to write to the NVM.
8841 *****************************************************************************/ 8861 *****************************************************************************/
8842int32_t 8862static int32_t
8843e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) 8863e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8844{ 8864{
8845 int32_t status = E1000_SUCCESS; 8865 int32_t status = E1000_SUCCESS;
@@ -8857,7 +8877,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8857 * index - The starting byte index of the word to read. 8877 * index - The starting byte index of the word to read.
8858 * data - Pointer to a word to store the value read. 8878 * data - Pointer to a word to store the value read.
8859 *****************************************************************************/ 8879 *****************************************************************************/
8860int32_t 8880static int32_t
8861e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) 8881e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8862{ 8882{
8863 int32_t status = E1000_SUCCESS; 8883 int32_t status = E1000_SUCCESS;
@@ -8872,6 +8892,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8872 * index - The starting byte index of the word to read. 8892 * index - The starting byte index of the word to read.
8873 * data - The word to write to the NVM. 8893 * data - The word to write to the NVM.
8874 *****************************************************************************/ 8894 *****************************************************************************/
8895#if 0
8875int32_t 8896int32_t
8876e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) 8897e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8877{ 8898{
@@ -8879,6 +8900,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8879 status = e1000_write_ich8_data(hw, index, 2, data); 8900 status = e1000_write_ich8_data(hw, index, 2, data);
8880 return status; 8901 return status;
8881} 8902}
8903#endif /* 0 */
8882 8904
8883/****************************************************************************** 8905/******************************************************************************
8884 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. 8906 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
@@ -8887,7 +8909,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8887 * hw - pointer to e1000_hw structure 8909 * hw - pointer to e1000_hw structure
8888 * segment - 0 for first segment, 1 for second segment, etc. 8910 * segment - 0 for first segment, 1 for second segment, etc.
8889 *****************************************************************************/ 8911 *****************************************************************************/
8890int32_t 8912static int32_t
8891e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) 8913e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8892{ 8914{
8893 union ich8_hws_flash_status hsfsts; 8915 union ich8_hws_flash_status hsfsts;
@@ -8984,6 +9006,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8984 * hw: Struct containing variables accessed by shared code 9006 * hw: Struct containing variables accessed by shared code
8985 * 9007 *
8986 *****************************************************************************/ 9008 *****************************************************************************/
9009#if 0
8987int32_t 9010int32_t
8988e1000_duplex_reversal(struct e1000_hw *hw) 9011e1000_duplex_reversal(struct e1000_hw *hw)
8989{ 9012{
@@ -9012,8 +9035,9 @@ e1000_duplex_reversal(struct e1000_hw *hw)
9012 9035
9013 return ret_val; 9036 return ret_val;
9014} 9037}
9038#endif /* 0 */
9015 9039
9016int32_t 9040static int32_t
9017e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, 9041e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9018 uint32_t cnf_base_addr, uint32_t cnf_size) 9042 uint32_t cnf_base_addr, uint32_t cnf_size)
9019{ 9043{
@@ -9047,7 +9071,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9047} 9071}
9048 9072
9049 9073
9050int32_t 9074static int32_t
9051e1000_init_lcd_from_nvm(struct e1000_hw *hw) 9075e1000_init_lcd_from_nvm(struct e1000_hw *hw)
9052{ 9076{
9053 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; 9077 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index f9341e3276b3..a170e96251f6 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -323,13 +323,8 @@ int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t dat
323int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 323int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
324int32_t e1000_phy_reset(struct e1000_hw *hw); 324int32_t e1000_phy_reset(struct e1000_hw *hw);
325void e1000_phy_powerdown_workaround(struct e1000_hw *hw); 325void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
326int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
327int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
328int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
329int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 326int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 327int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
331int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
332int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
333 328
334/* EEPROM Functions */ 329/* EEPROM Functions */
335int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 330int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -341,9 +336,9 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 336#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
342 337
343#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ 338#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */
344#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ 339#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */
345#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ 340#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */
346#define E1000_MNG_IAMT_MODE 0x3 341#define E1000_MNG_IAMT_MODE 0x3
347#define E1000_MNG_ICH_IAMT_MODE 0x2 342#define E1000_MNG_ICH_IAMT_MODE 0x2
348#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ 343#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */
349 344
@@ -390,7 +385,7 @@ struct e1000_host_mng_dhcp_cookie{
390#endif 385#endif
391 386
392int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 387int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
393 uint16_t length); 388 uint16_t length);
394boolean_t e1000_check_mng_mode(struct e1000_hw *hw); 389boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
395boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 390boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
396 391
@@ -400,13 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
400int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 395int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
401int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 396int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
402int32_t e1000_read_mac_addr(struct e1000_hw * hw); 397int32_t e1000_read_mac_addr(struct e1000_hw * hw);
403int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
404void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
405void e1000_release_software_flag(struct e1000_hw *hw);
406int32_t e1000_get_software_flag(struct e1000_hw *hw);
407 398
408/* Filters (multicast, vlan, receive) */ 399/* Filters (multicast, vlan, receive) */
409void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
410uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 400uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
411void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 401void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
412void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 402void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -431,31 +421,9 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw);
431void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 421void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
432void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 422void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
433/* Port I/O is only supported on 82544 and newer */ 423/* Port I/O is only supported on 82544 and newer */
434uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
435uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
436void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 424void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
437void e1000_enable_pciex_master(struct e1000_hw *hw);
438int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 425int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
439int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
440void e1000_release_software_semaphore(struct e1000_hw *hw);
441int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 426int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
442int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
443
444int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
445 uint8_t *data);
446int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
447 uint8_t byte);
448int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
449 uint8_t byte);
450int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
451 uint16_t *data);
452int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
453 uint32_t size, uint16_t *data);
454int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
455 uint16_t words, uint16_t *data);
456int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
457 uint16_t words, uint16_t *data);
458int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
459 427
460 428
461#define E1000_READ_REG_IO(a, reg) \ 429#define E1000_READ_REG_IO(a, reg) \
@@ -502,6 +470,7 @@ int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
502#define E1000_DEV_ID_82571EB_COPPER 0x105E 470#define E1000_DEV_ID_82571EB_COPPER 0x105E
503#define E1000_DEV_ID_82571EB_FIBER 0x105F 471#define E1000_DEV_ID_82571EB_FIBER 0x105F
504#define E1000_DEV_ID_82571EB_SERDES 0x1060 472#define E1000_DEV_ID_82571EB_SERDES 0x1060
473#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
505#define E1000_DEV_ID_82572EI_COPPER 0x107D 474#define E1000_DEV_ID_82572EI_COPPER 0x107D
506#define E1000_DEV_ID_82572EI_FIBER 0x107E 475#define E1000_DEV_ID_82572EI_FIBER 0x107E
507#define E1000_DEV_ID_82572EI_SERDES 0x107F 476#define E1000_DEV_ID_82572EI_SERDES 0x107F
@@ -555,7 +524,7 @@ int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
555 524
556 525
557/* 802.1q VLAN Packet Sizes */ 526/* 802.1q VLAN Packet Sizes */
558#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ 527#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */
559 528
560/* Ethertype field values */ 529/* Ethertype field values */
561#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ 530#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */
@@ -729,6 +698,7 @@ union e1000_rx_desc_packet_split {
729 E1000_RXDEXT_STATERR_CXE | \ 698 E1000_RXDEXT_STATERR_CXE | \
730 E1000_RXDEXT_STATERR_RXE) 699 E1000_RXDEXT_STATERR_RXE)
731 700
701
732/* Transmit Descriptor */ 702/* Transmit Descriptor */
733struct e1000_tx_desc { 703struct e1000_tx_desc {
734 uint64_t buffer_addr; /* Address of the descriptor's data buffer */ 704 uint64_t buffer_addr; /* Address of the descriptor's data buffer */
@@ -2118,7 +2088,7 @@ struct e1000_hw {
2118#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address 2088#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address
2119 * filtering */ 2089 * filtering */
2120#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ 2090#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */
2121#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ 2091#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */
2122#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ 2092#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */
2123#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ 2093#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */
2124#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ 2094#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */
@@ -2204,7 +2174,7 @@ struct e1000_host_command_info {
2204 2174
2205#define E1000_MDALIGN 4096 2175#define E1000_MDALIGN 4096
2206 2176
2207/* PCI-Ex registers */ 2177/* PCI-Ex registers*/
2208 2178
2209/* PCI-Ex Control Register */ 2179/* PCI-Ex Control Register */
2210#define E1000_GCR_RXD_NO_SNOOP 0x00000001 2180#define E1000_GCR_RXD_NO_SNOOP 0x00000001
@@ -2256,7 +2226,7 @@ struct e1000_host_command_info {
2256#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */ 2226#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erast/write disable */
2257 2227
2258/* EEPROM Commands - SPI */ 2228/* EEPROM Commands - SPI */
2259#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ 2229#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */
2260#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ 2230#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */
2261#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 2231#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */
2262#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ 2232#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */
@@ -3114,10 +3084,10 @@ struct e1000_host_command_info {
3114 3084
3115/* DSP Distance Register (Page 5, Register 26) */ 3085/* DSP Distance Register (Page 5, Register 26) */
3116#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M; 3086#define GG82563_DSPD_CABLE_LENGTH 0x0007 /* 0 = <50M;
3117 1 = 50-80M; 3087 1 = 50-80M;
3118 2 = 80-110M; 3088 2 = 80-110M;
3119 3 = 110-140M; 3089 3 = 110-140M;
3120 4 = >140M */ 3090 4 = >140M */
3121 3091
3122/* Kumeran Mode Control Register (Page 193, Register 16) */ 3092/* Kumeran Mode Control Register (Page 193, Register 16) */
3123#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */ 3093#define GG82563_KMCR_PHY_LEDS_EN 0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index da62db897426..dece1838aaf5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "7.1.9-k4"DRIVERNAPI 39#define DRV_VERSION "7.2.7-k2"DRIVERNAPI
40char e1000_driver_version[] = DRV_VERSION; 40char e1000_driver_version[] = DRV_VERSION;
41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -48,7 +48,6 @@ static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} 48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 */ 49 */
50static struct pci_device_id e1000_pci_tbl[] = { 50static struct pci_device_id e1000_pci_tbl[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1000),
52 INTEL_E1000_ETHERNET_DEVICE(0x1001), 51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
53 INTEL_E1000_ETHERNET_DEVICE(0x1004), 52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
54 INTEL_E1000_ETHERNET_DEVICE(0x1008), 53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
@@ -99,6 +98,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
99 INTEL_E1000_ETHERNET_DEVICE(0x1098), 98 INTEL_E1000_ETHERNET_DEVICE(0x1098),
100 INTEL_E1000_ETHERNET_DEVICE(0x1099), 99 INTEL_E1000_ETHERNET_DEVICE(0x1099),
101 INTEL_E1000_ETHERNET_DEVICE(0x109A), 100 INTEL_E1000_ETHERNET_DEVICE(0x109A),
101 INTEL_E1000_ETHERNET_DEVICE(0x10A4),
102 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 102 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
103 INTEL_E1000_ETHERNET_DEVICE(0x10B9), 103 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
104 INTEL_E1000_ETHERNET_DEVICE(0x10BA), 104 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
@@ -245,7 +245,7 @@ e1000_init_module(void)
245 245
246 printk(KERN_INFO "%s\n", e1000_copyright); 246 printk(KERN_INFO "%s\n", e1000_copyright);
247 247
248 ret = pci_module_init(&e1000_driver); 248 ret = pci_register_driver(&e1000_driver);
249 249
250 return ret; 250 return ret;
251} 251}
@@ -485,7 +485,7 @@ e1000_up(struct e1000_adapter *adapter)
485 * 485 *
486 **/ 486 **/
487 487
488static void e1000_power_up_phy(struct e1000_adapter *adapter) 488void e1000_power_up_phy(struct e1000_adapter *adapter)
489{ 489{
490 uint16_t mii_reg = 0; 490 uint16_t mii_reg = 0;
491 491
@@ -682,9 +682,9 @@ e1000_probe(struct pci_dev *pdev,
682 unsigned long flash_start, flash_len; 682 unsigned long flash_start, flash_len;
683 683
684 static int cards_found = 0; 684 static int cards_found = 0;
685 static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */ 685 static int global_quad_port_a = 0; /* global ksp3 port a indication */
686 int i, err, pci_using_dac; 686 int i, err, pci_using_dac;
687 uint16_t eeprom_data; 687 uint16_t eeprom_data = 0;
688 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 688 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
689 if ((err = pci_enable_device(pdev))) 689 if ((err = pci_enable_device(pdev)))
690 return err; 690 return err;
@@ -696,21 +696,20 @@ e1000_probe(struct pci_dev *pdev,
696 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) && 696 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
697 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) { 697 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
698 E1000_ERR("No usable DMA configuration, aborting\n"); 698 E1000_ERR("No usable DMA configuration, aborting\n");
699 return err; 699 goto err_dma;
700 } 700 }
701 pci_using_dac = 0; 701 pci_using_dac = 0;
702 } 702 }
703 703
704 if ((err = pci_request_regions(pdev, e1000_driver_name))) 704 if ((err = pci_request_regions(pdev, e1000_driver_name)))
705 return err; 705 goto err_pci_reg;
706 706
707 pci_set_master(pdev); 707 pci_set_master(pdev);
708 708
709 err = -ENOMEM;
709 netdev = alloc_etherdev(sizeof(struct e1000_adapter)); 710 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
710 if (!netdev) { 711 if (!netdev)
711 err = -ENOMEM;
712 goto err_alloc_etherdev; 712 goto err_alloc_etherdev;
713 }
714 713
715 SET_MODULE_OWNER(netdev); 714 SET_MODULE_OWNER(netdev);
716 SET_NETDEV_DEV(netdev, &pdev->dev); 715 SET_NETDEV_DEV(netdev, &pdev->dev);
@@ -725,11 +724,10 @@ e1000_probe(struct pci_dev *pdev,
725 mmio_start = pci_resource_start(pdev, BAR_0); 724 mmio_start = pci_resource_start(pdev, BAR_0);
726 mmio_len = pci_resource_len(pdev, BAR_0); 725 mmio_len = pci_resource_len(pdev, BAR_0);
727 726
727 err = -EIO;
728 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 728 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
729 if (!adapter->hw.hw_addr) { 729 if (!adapter->hw.hw_addr)
730 err = -EIO;
731 goto err_ioremap; 730 goto err_ioremap;
732 }
733 731
734 for (i = BAR_1; i <= BAR_5; i++) { 732 for (i = BAR_1; i <= BAR_5; i++) {
735 if (pci_resource_len(pdev, i) == 0) 733 if (pci_resource_len(pdev, i) == 0)
@@ -774,6 +772,7 @@ e1000_probe(struct pci_dev *pdev,
774 if ((err = e1000_sw_init(adapter))) 772 if ((err = e1000_sw_init(adapter)))
775 goto err_sw_init; 773 goto err_sw_init;
776 774
775 err = -EIO;
777 /* Flash BAR mapping must happen after e1000_sw_init 776 /* Flash BAR mapping must happen after e1000_sw_init
778 * because it depends on mac_type */ 777 * because it depends on mac_type */
779 if ((adapter->hw.mac_type == e1000_ich8lan) && 778 if ((adapter->hw.mac_type == e1000_ich8lan) &&
@@ -781,24 +780,13 @@ e1000_probe(struct pci_dev *pdev,
781 flash_start = pci_resource_start(pdev, 1); 780 flash_start = pci_resource_start(pdev, 1);
782 flash_len = pci_resource_len(pdev, 1); 781 flash_len = pci_resource_len(pdev, 1);
783 adapter->hw.flash_address = ioremap(flash_start, flash_len); 782 adapter->hw.flash_address = ioremap(flash_start, flash_len);
784 if (!adapter->hw.flash_address) { 783 if (!adapter->hw.flash_address)
785 err = -EIO;
786 goto err_flashmap; 784 goto err_flashmap;
787 }
788 } 785 }
789 786
790 if ((err = e1000_check_phy_reset_block(&adapter->hw))) 787 if (e1000_check_phy_reset_block(&adapter->hw))
791 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n"); 788 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
792 789
793 /* if ksp3, indicate if it's port a being setup */
794 if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
795 e1000_ksp3_port_a == 0)
796 adapter->ksp3_port_a = 1;
797 e1000_ksp3_port_a++;
798 /* Reset for multiple KP3 adapters */
799 if (e1000_ksp3_port_a == 4)
800 e1000_ksp3_port_a = 0;
801
802 if (adapter->hw.mac_type >= e1000_82543) { 790 if (adapter->hw.mac_type >= e1000_82543) {
803 netdev->features = NETIF_F_SG | 791 netdev->features = NETIF_F_SG |
804 NETIF_F_HW_CSUM | 792 NETIF_F_HW_CSUM |
@@ -830,7 +818,7 @@ e1000_probe(struct pci_dev *pdev,
830 818
831 if (e1000_init_eeprom_params(&adapter->hw)) { 819 if (e1000_init_eeprom_params(&adapter->hw)) {
832 E1000_ERR("EEPROM initialization failed\n"); 820 E1000_ERR("EEPROM initialization failed\n");
833 return -EIO; 821 goto err_eeprom;
834 } 822 }
835 823
836 /* before reading the EEPROM, reset the controller to 824 /* before reading the EEPROM, reset the controller to
@@ -842,7 +830,6 @@ e1000_probe(struct pci_dev *pdev,
842 830
843 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) { 831 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
844 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 832 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
845 err = -EIO;
846 goto err_eeprom; 833 goto err_eeprom;
847 } 834 }
848 835
@@ -855,12 +842,9 @@ e1000_probe(struct pci_dev *pdev,
855 842
856 if (!is_valid_ether_addr(netdev->perm_addr)) { 843 if (!is_valid_ether_addr(netdev->perm_addr)) {
857 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 844 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
858 err = -EIO;
859 goto err_eeprom; 845 goto err_eeprom;
860 } 846 }
861 847
862 e1000_read_part_num(&adapter->hw, &(adapter->part_num));
863
864 e1000_get_bus_info(&adapter->hw); 848 e1000_get_bus_info(&adapter->hw);
865 849
866 init_timer(&adapter->tx_fifo_stall_timer); 850 init_timer(&adapter->tx_fifo_stall_timer);
@@ -921,7 +905,38 @@ e1000_probe(struct pci_dev *pdev,
921 break; 905 break;
922 } 906 }
923 if (eeprom_data & eeprom_apme_mask) 907 if (eeprom_data & eeprom_apme_mask)
924 adapter->wol |= E1000_WUFC_MAG; 908 adapter->eeprom_wol |= E1000_WUFC_MAG;
909
910 /* now that we have the eeprom settings, apply the special cases
911 * where the eeprom may be wrong or the board simply won't support
912 * wake on lan on a particular port */
913 switch (pdev->device) {
914 case E1000_DEV_ID_82546GB_PCIE:
915 adapter->eeprom_wol = 0;
916 break;
917 case E1000_DEV_ID_82546EB_FIBER:
918 case E1000_DEV_ID_82546GB_FIBER:
919 case E1000_DEV_ID_82571EB_FIBER:
920 /* Wake events only supported on port A for dual fiber
921 * regardless of eeprom setting */
922 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
923 adapter->eeprom_wol = 0;
924 break;
925 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
926 case E1000_DEV_ID_82571EB_QUAD_COPPER:
927 /* if quad port adapter, disable WoL on all but port A */
928 if (global_quad_port_a != 0)
929 adapter->eeprom_wol = 0;
930 else
931 adapter->quad_port_a = 1;
932 /* Reset for multiple quad port adapters */
933 if (++global_quad_port_a == 4)
934 global_quad_port_a = 0;
935 break;
936 }
937
938 /* initialize the wol settings based on the eeprom settings */
939 adapter->wol = adapter->eeprom_wol;
925 940
926 /* print bus type/speed/width info */ 941 /* print bus type/speed/width info */
927 { 942 {
@@ -964,16 +979,33 @@ e1000_probe(struct pci_dev *pdev,
964 return 0; 979 return 0;
965 980
966err_register: 981err_register:
982 e1000_release_hw_control(adapter);
983err_eeprom:
984 if (!e1000_check_phy_reset_block(&adapter->hw))
985 e1000_phy_hw_reset(&adapter->hw);
986
967 if (adapter->hw.flash_address) 987 if (adapter->hw.flash_address)
968 iounmap(adapter->hw.flash_address); 988 iounmap(adapter->hw.flash_address);
969err_flashmap: 989err_flashmap:
990#ifdef CONFIG_E1000_NAPI
991 for (i = 0; i < adapter->num_rx_queues; i++)
992 dev_put(&adapter->polling_netdev[i]);
993#endif
994
995 kfree(adapter->tx_ring);
996 kfree(adapter->rx_ring);
997#ifdef CONFIG_E1000_NAPI
998 kfree(adapter->polling_netdev);
999#endif
970err_sw_init: 1000err_sw_init:
971err_eeprom:
972 iounmap(adapter->hw.hw_addr); 1001 iounmap(adapter->hw.hw_addr);
973err_ioremap: 1002err_ioremap:
974 free_netdev(netdev); 1003 free_netdev(netdev);
975err_alloc_etherdev: 1004err_alloc_etherdev:
976 pci_release_regions(pdev); 1005 pci_release_regions(pdev);
1006err_pci_reg:
1007err_dma:
1008 pci_disable_device(pdev);
977 return err; 1009 return err;
978} 1010}
979 1011
@@ -1208,7 +1240,7 @@ e1000_open(struct net_device *netdev)
1208 1240
1209 err = e1000_request_irq(adapter); 1241 err = e1000_request_irq(adapter);
1210 if (err) 1242 if (err)
1211 goto err_up; 1243 goto err_req_irq;
1212 1244
1213 e1000_power_up_phy(adapter); 1245 e1000_power_up_phy(adapter);
1214 1246
@@ -1229,6 +1261,9 @@ e1000_open(struct net_device *netdev)
1229 return E1000_SUCCESS; 1261 return E1000_SUCCESS;
1230 1262
1231err_up: 1263err_up:
1264 e1000_power_down_phy(adapter);
1265 e1000_free_irq(adapter);
1266err_req_irq:
1232 e1000_free_all_rx_resources(adapter); 1267 e1000_free_all_rx_resources(adapter);
1233err_setup_rx: 1268err_setup_rx:
1234 e1000_free_all_tx_resources(adapter); 1269 e1000_free_all_tx_resources(adapter);
@@ -1381,10 +1416,6 @@ setup_tx_desc_die:
1381 * (Descriptors) for all queues 1416 * (Descriptors) for all queues
1382 * @adapter: board private structure 1417 * @adapter: board private structure
1383 * 1418 *
1384 * If this function returns with an error, then it's possible one or
1385 * more of the rings is populated (while the rest are not). It is the
1386 * callers duty to clean those orphaned rings.
1387 *
1388 * Return 0 on success, negative on failure 1419 * Return 0 on success, negative on failure
1389 **/ 1420 **/
1390 1421
@@ -1398,6 +1429,9 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1398 if (err) { 1429 if (err) {
1399 DPRINTK(PROBE, ERR, 1430 DPRINTK(PROBE, ERR,
1400 "Allocation for Tx Queue %u failed\n", i); 1431 "Allocation for Tx Queue %u failed\n", i);
1432 for (i-- ; i >= 0; i--)
1433 e1000_free_tx_resources(adapter,
1434 &adapter->tx_ring[i]);
1401 break; 1435 break;
1402 } 1436 }
1403 } 1437 }
@@ -1433,8 +1467,8 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1433 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL)); 1467 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1434 E1000_WRITE_REG(hw, TDT, 0); 1468 E1000_WRITE_REG(hw, TDT, 0);
1435 E1000_WRITE_REG(hw, TDH, 0); 1469 E1000_WRITE_REG(hw, TDH, 0);
1436 adapter->tx_ring[0].tdh = E1000_TDH; 1470 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1437 adapter->tx_ring[0].tdt = E1000_TDT; 1471 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1438 break; 1472 break;
1439 } 1473 }
1440 1474
@@ -1499,8 +1533,6 @@ e1000_configure_tx(struct e1000_adapter *adapter)
1499 } else if (hw->mac_type == e1000_80003es2lan) { 1533 } else if (hw->mac_type == e1000_80003es2lan) {
1500 tarc = E1000_READ_REG(hw, TARC0); 1534 tarc = E1000_READ_REG(hw, TARC0);
1501 tarc |= 1; 1535 tarc |= 1;
1502 if (hw->media_type == e1000_media_type_internal_serdes)
1503 tarc |= (1 << 20);
1504 E1000_WRITE_REG(hw, TARC0, tarc); 1536 E1000_WRITE_REG(hw, TARC0, tarc);
1505 tarc = E1000_READ_REG(hw, TARC1); 1537 tarc = E1000_READ_REG(hw, TARC1);
1506 tarc |= 1; 1538 tarc |= 1;
@@ -1639,10 +1671,6 @@ setup_rx_desc_die:
1639 * (Descriptors) for all queues 1671 * (Descriptors) for all queues
1640 * @adapter: board private structure 1672 * @adapter: board private structure
1641 * 1673 *
1642 * If this function returns with an error, then it's possible one or
1643 * more of the rings is populated (while the rest are not). It is the
1644 * callers duty to clean those orphaned rings.
1645 *
1646 * Return 0 on success, negative on failure 1674 * Return 0 on success, negative on failure
1647 **/ 1675 **/
1648 1676
@@ -1656,6 +1684,9 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1656 if (err) { 1684 if (err) {
1657 DPRINTK(PROBE, ERR, 1685 DPRINTK(PROBE, ERR,
1658 "Allocation for Rx Queue %u failed\n", i); 1686 "Allocation for Rx Queue %u failed\n", i);
1687 for (i-- ; i >= 0; i--)
1688 e1000_free_rx_resources(adapter,
1689 &adapter->rx_ring[i]);
1659 break; 1690 break;
1660 } 1691 }
1661 } 1692 }
@@ -1840,8 +1871,8 @@ e1000_configure_rx(struct e1000_adapter *adapter)
1840 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL)); 1871 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1841 E1000_WRITE_REG(hw, RDT, 0); 1872 E1000_WRITE_REG(hw, RDT, 0);
1842 E1000_WRITE_REG(hw, RDH, 0); 1873 E1000_WRITE_REG(hw, RDH, 0);
1843 adapter->rx_ring[0].rdh = E1000_RDH; 1874 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1844 adapter->rx_ring[0].rdt = E1000_RDT; 1875 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1845 break; 1876 break;
1846 } 1877 }
1847 1878
@@ -2442,10 +2473,9 @@ e1000_watchdog(unsigned long data)
2442 * disable receives in the ISR and 2473 * disable receives in the ISR and
2443 * reset device here in the watchdog 2474 * reset device here in the watchdog
2444 */ 2475 */
2445 if (adapter->hw.mac_type == e1000_80003es2lan) { 2476 if (adapter->hw.mac_type == e1000_80003es2lan)
2446 /* reset device */ 2477 /* reset device */
2447 schedule_work(&adapter->reset_task); 2478 schedule_work(&adapter->reset_task);
2448 }
2449 } 2479 }
2450 2480
2451 e1000_smartspeed(adapter); 2481 e1000_smartspeed(adapter);
@@ -2545,7 +2575,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2545 cmd_length = E1000_TXD_CMD_IP; 2575 cmd_length = E1000_TXD_CMD_IP;
2546 ipcse = skb->h.raw - skb->data - 1; 2576 ipcse = skb->h.raw - skb->data - 1;
2547#ifdef NETIF_F_TSO_IPV6 2577#ifdef NETIF_F_TSO_IPV6
2548 } else if (skb->protocol == ntohs(ETH_P_IPV6)) { 2578 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2549 skb->nh.ipv6h->payload_len = 0; 2579 skb->nh.ipv6h->payload_len = 0;
2550 skb->h.th->check = 2580 skb->h.th->check =
2551 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr, 2581 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
@@ -3127,7 +3157,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3127 break; 3157 break;
3128 } 3158 }
3129 3159
3130 /* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3160 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3131 * means we reserve 2 more, this pushes us to allocate from the next 3161 * means we reserve 2 more, this pushes us to allocate from the next
3132 * larger slab size 3162 * larger slab size
3133 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3163 * i.e. RXBUFFER_2048 --> size-4096 slab */
@@ -3680,7 +3710,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3680 E1000_DBG("%s: Receive packet consumed multiple" 3710 E1000_DBG("%s: Receive packet consumed multiple"
3681 " buffers\n", netdev->name); 3711 " buffers\n", netdev->name);
3682 /* recycle */ 3712 /* recycle */
3683 buffer_info-> skb = skb; 3713 buffer_info->skb = skb;
3684 goto next_desc; 3714 goto next_desc;
3685 } 3715 }
3686 3716
@@ -3708,10 +3738,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
3708#define E1000_CB_LENGTH 256 3738#define E1000_CB_LENGTH 256
3709 if (length < E1000_CB_LENGTH) { 3739 if (length < E1000_CB_LENGTH) {
3710 struct sk_buff *new_skb = 3740 struct sk_buff *new_skb =
3711 dev_alloc_skb(length + NET_IP_ALIGN); 3741 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
3712 if (new_skb) { 3742 if (new_skb) {
3713 skb_reserve(new_skb, NET_IP_ALIGN); 3743 skb_reserve(new_skb, NET_IP_ALIGN);
3714 new_skb->dev = netdev;
3715 memcpy(new_skb->data - NET_IP_ALIGN, 3744 memcpy(new_skb->data - NET_IP_ALIGN,
3716 skb->data - NET_IP_ALIGN, 3745 skb->data - NET_IP_ALIGN,
3717 length + NET_IP_ALIGN); 3746 length + NET_IP_ALIGN);
@@ -3978,13 +4007,13 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3978 buffer_info = &rx_ring->buffer_info[i]; 4007 buffer_info = &rx_ring->buffer_info[i];
3979 4008
3980 while (cleaned_count--) { 4009 while (cleaned_count--) {
3981 if (!(skb = buffer_info->skb)) 4010 skb = buffer_info->skb;
3982 skb = dev_alloc_skb(bufsz); 4011 if (skb) {
3983 else {
3984 skb_trim(skb, 0); 4012 skb_trim(skb, 0);
3985 goto map_skb; 4013 goto map_skb;
3986 } 4014 }
3987 4015
4016 skb = netdev_alloc_skb(netdev, bufsz);
3988 if (unlikely(!skb)) { 4017 if (unlikely(!skb)) {
3989 /* Better luck next round */ 4018 /* Better luck next round */
3990 adapter->alloc_rx_buff_failed++; 4019 adapter->alloc_rx_buff_failed++;
@@ -3997,7 +4026,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
3997 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4026 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
3998 "at %p\n", bufsz, skb->data); 4027 "at %p\n", bufsz, skb->data);
3999 /* Try again, without freeing the previous */ 4028 /* Try again, without freeing the previous */
4000 skb = dev_alloc_skb(bufsz); 4029 skb = netdev_alloc_skb(netdev, bufsz);
4001 /* Failed allocation, critical failure */ 4030 /* Failed allocation, critical failure */
4002 if (!skb) { 4031 if (!skb) {
4003 dev_kfree_skb(oldskb); 4032 dev_kfree_skb(oldskb);
@@ -4009,10 +4038,10 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4009 dev_kfree_skb(skb); 4038 dev_kfree_skb(skb);
4010 dev_kfree_skb(oldskb); 4039 dev_kfree_skb(oldskb);
4011 break; /* while !buffer_info->skb */ 4040 break; /* while !buffer_info->skb */
4012 } else {
4013 /* Use new allocation */
4014 dev_kfree_skb(oldskb);
4015 } 4041 }
4042
4043 /* Use new allocation */
4044 dev_kfree_skb(oldskb);
4016 } 4045 }
4017 /* Make buffer alignment 2 beyond a 16 byte boundary 4046 /* Make buffer alignment 2 beyond a 16 byte boundary
4018 * this will result in a 16 byte aligned IP header after 4047 * this will result in a 16 byte aligned IP header after
@@ -4020,8 +4049,6 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4020 */ 4049 */
4021 skb_reserve(skb, NET_IP_ALIGN); 4050 skb_reserve(skb, NET_IP_ALIGN);
4022 4051
4023 skb->dev = netdev;
4024
4025 buffer_info->skb = skb; 4052 buffer_info->skb = skb;
4026 buffer_info->length = adapter->rx_buffer_len; 4053 buffer_info->length = adapter->rx_buffer_len;
4027map_skb: 4054map_skb:
@@ -4121,7 +4148,8 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4121 rx_desc->read.buffer_addr[j+1] = ~0; 4148 rx_desc->read.buffer_addr[j+1] = ~0;
4122 } 4149 }
4123 4150
4124 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN); 4151 skb = netdev_alloc_skb(netdev,
4152 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4125 4153
4126 if (unlikely(!skb)) { 4154 if (unlikely(!skb)) {
4127 adapter->alloc_rx_buff_failed++; 4155 adapter->alloc_rx_buff_failed++;
@@ -4134,8 +4162,6 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4134 */ 4162 */
4135 skb_reserve(skb, NET_IP_ALIGN); 4163 skb_reserve(skb, NET_IP_ALIGN);
4136 4164
4137 skb->dev = netdev;
4138
4139 buffer_info->skb = skb; 4165 buffer_info->skb = skb;
4140 buffer_info->length = adapter->rx_ps_bsize0; 4166 buffer_info->length = adapter->rx_ps_bsize0;
4141 buffer_info->dma = pci_map_single(pdev, skb->data, 4167 buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -4385,11 +4411,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4385 pci_write_config_word(adapter->pdev, reg, *value); 4411 pci_write_config_word(adapter->pdev, reg, *value);
4386} 4412}
4387 4413
4414#if 0
4388uint32_t 4415uint32_t
4389e1000_io_read(struct e1000_hw *hw, unsigned long port) 4416e1000_io_read(struct e1000_hw *hw, unsigned long port)
4390{ 4417{
4391 return inl(port); 4418 return inl(port);
4392} 4419}
4420#endif /* 0 */
4393 4421
4394void 4422void
4395e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4423e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
@@ -4625,7 +4653,7 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4625 e1000_set_multi(netdev); 4653 e1000_set_multi(netdev);
4626 4654
4627 /* turn on all-multi mode if wake on multicast is enabled */ 4655 /* turn on all-multi mode if wake on multicast is enabled */
4628 if (adapter->wol & E1000_WUFC_MC) { 4656 if (wufc & E1000_WUFC_MC) {
4629 rctl = E1000_READ_REG(&adapter->hw, RCTL); 4657 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4630 rctl |= E1000_RCTL_MPE; 4658 rctl |= E1000_RCTL_MPE;
4631 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 4659 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
@@ -4697,11 +4725,14 @@ e1000_resume(struct pci_dev *pdev)
4697{ 4725{
4698 struct net_device *netdev = pci_get_drvdata(pdev); 4726 struct net_device *netdev = pci_get_drvdata(pdev);
4699 struct e1000_adapter *adapter = netdev_priv(netdev); 4727 struct e1000_adapter *adapter = netdev_priv(netdev);
4700 uint32_t manc, ret_val; 4728 uint32_t manc, err;
4701 4729
4702 pci_set_power_state(pdev, PCI_D0); 4730 pci_set_power_state(pdev, PCI_D0);
4703 e1000_pci_restore_state(adapter); 4731 e1000_pci_restore_state(adapter);
4704 ret_val = pci_enable_device(pdev); 4732 if ((err = pci_enable_device(pdev))) {
4733 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
4734 return err;
4735 }
4705 pci_set_master(pdev); 4736 pci_set_master(pdev);
4706 4737
4707 pci_enable_wake(pdev, PCI_D3hot, 0); 4738 pci_enable_wake(pdev, PCI_D3hot, 0);
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 0ef413172c68..212842738972 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -324,7 +324,6 @@ e1000_check_options(struct e1000_adapter *adapter)
324 DPRINTK(PROBE, NOTICE, 324 DPRINTK(PROBE, NOTICE,
325 "Warning: no configuration for board #%i\n", bd); 325 "Warning: no configuration for board #%i\n", bd);
326 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n"); 326 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
327 bd = E1000_MAX_NIC;
328 } 327 }
329 328
330 { /* Transmit Descriptor Count */ 329 { /* Transmit Descriptor Count */
@@ -342,9 +341,14 @@ e1000_check_options(struct e1000_adapter *adapter)
342 opt.arg.r.max = mac_type < e1000_82544 ? 341 opt.arg.r.max = mac_type < e1000_82544 ?
343 E1000_MAX_TXD : E1000_MAX_82544_TXD; 342 E1000_MAX_TXD : E1000_MAX_82544_TXD;
344 343
345 tx_ring->count = TxDescriptors[bd]; 344 if (num_TxDescriptors > bd) {
346 e1000_validate_option(&tx_ring->count, &opt, adapter); 345 tx_ring->count = TxDescriptors[bd];
347 E1000_ROUNDUP(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE); 346 e1000_validate_option(&tx_ring->count, &opt, adapter);
347 E1000_ROUNDUP(tx_ring->count,
348 REQ_TX_DESCRIPTOR_MULTIPLE);
349 } else {
350 tx_ring->count = opt.def;
351 }
348 for (i = 0; i < adapter->num_tx_queues; i++) 352 for (i = 0; i < adapter->num_tx_queues; i++)
349 tx_ring[i].count = tx_ring->count; 353 tx_ring[i].count = tx_ring->count;
350 } 354 }
@@ -363,9 +367,14 @@ e1000_check_options(struct e1000_adapter *adapter)
363 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD : 367 opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
364 E1000_MAX_82544_RXD; 368 E1000_MAX_82544_RXD;
365 369
366 rx_ring->count = RxDescriptors[bd]; 370 if (num_RxDescriptors > bd) {
367 e1000_validate_option(&rx_ring->count, &opt, adapter); 371 rx_ring->count = RxDescriptors[bd];
368 E1000_ROUNDUP(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE); 372 e1000_validate_option(&rx_ring->count, &opt, adapter);
373 E1000_ROUNDUP(rx_ring->count,
374 REQ_RX_DESCRIPTOR_MULTIPLE);
375 } else {
376 rx_ring->count = opt.def;
377 }
369 for (i = 0; i < adapter->num_rx_queues; i++) 378 for (i = 0; i < adapter->num_rx_queues; i++)
370 rx_ring[i].count = rx_ring->count; 379 rx_ring[i].count = rx_ring->count;
371 } 380 }
@@ -377,9 +386,13 @@ e1000_check_options(struct e1000_adapter *adapter)
377 .def = OPTION_ENABLED 386 .def = OPTION_ENABLED
378 }; 387 };
379 388
380 int rx_csum = XsumRX[bd]; 389 if (num_XsumRX > bd) {
381 e1000_validate_option(&rx_csum, &opt, adapter); 390 int rx_csum = XsumRX[bd];
382 adapter->rx_csum = rx_csum; 391 e1000_validate_option(&rx_csum, &opt, adapter);
392 adapter->rx_csum = rx_csum;
393 } else {
394 adapter->rx_csum = opt.def;
395 }
383 } 396 }
384 { /* Flow Control */ 397 { /* Flow Control */
385 398
@@ -399,9 +412,13 @@ e1000_check_options(struct e1000_adapter *adapter)
399 .p = fc_list }} 412 .p = fc_list }}
400 }; 413 };
401 414
402 int fc = FlowControl[bd]; 415 if (num_FlowControl > bd) {
403 e1000_validate_option(&fc, &opt, adapter); 416 int fc = FlowControl[bd];
404 adapter->hw.fc = adapter->hw.original_fc = fc; 417 e1000_validate_option(&fc, &opt, adapter);
418 adapter->hw.fc = adapter->hw.original_fc = fc;
419 } else {
420 adapter->hw.fc = adapter->hw.original_fc = opt.def;
421 }
405 } 422 }
406 { /* Transmit Interrupt Delay */ 423 { /* Transmit Interrupt Delay */
407 struct e1000_option opt = { 424 struct e1000_option opt = {
@@ -413,8 +430,13 @@ e1000_check_options(struct e1000_adapter *adapter)
413 .max = MAX_TXDELAY }} 430 .max = MAX_TXDELAY }}
414 }; 431 };
415 432
416 adapter->tx_int_delay = TxIntDelay[bd]; 433 if (num_TxIntDelay > bd) {
417 e1000_validate_option(&adapter->tx_int_delay, &opt, adapter); 434 adapter->tx_int_delay = TxIntDelay[bd];
435 e1000_validate_option(&adapter->tx_int_delay, &opt,
436 adapter);
437 } else {
438 adapter->tx_int_delay = opt.def;
439 }
418 } 440 }
419 { /* Transmit Absolute Interrupt Delay */ 441 { /* Transmit Absolute Interrupt Delay */
420 struct e1000_option opt = { 442 struct e1000_option opt = {
@@ -426,9 +448,13 @@ e1000_check_options(struct e1000_adapter *adapter)
426 .max = MAX_TXABSDELAY }} 448 .max = MAX_TXABSDELAY }}
427 }; 449 };
428 450
429 adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; 451 if (num_TxAbsIntDelay > bd) {
430 e1000_validate_option(&adapter->tx_abs_int_delay, &opt, 452 adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
431 adapter); 453 e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
454 adapter);
455 } else {
456 adapter->tx_abs_int_delay = opt.def;
457 }
432 } 458 }
433 { /* Receive Interrupt Delay */ 459 { /* Receive Interrupt Delay */
434 struct e1000_option opt = { 460 struct e1000_option opt = {
@@ -440,8 +466,13 @@ e1000_check_options(struct e1000_adapter *adapter)
440 .max = MAX_RXDELAY }} 466 .max = MAX_RXDELAY }}
441 }; 467 };
442 468
443 adapter->rx_int_delay = RxIntDelay[bd]; 469 if (num_RxIntDelay > bd) {
444 e1000_validate_option(&adapter->rx_int_delay, &opt, adapter); 470 adapter->rx_int_delay = RxIntDelay[bd];
471 e1000_validate_option(&adapter->rx_int_delay, &opt,
472 adapter);
473 } else {
474 adapter->rx_int_delay = opt.def;
475 }
445 } 476 }
446 { /* Receive Absolute Interrupt Delay */ 477 { /* Receive Absolute Interrupt Delay */
447 struct e1000_option opt = { 478 struct e1000_option opt = {
@@ -453,9 +484,13 @@ e1000_check_options(struct e1000_adapter *adapter)
453 .max = MAX_RXABSDELAY }} 484 .max = MAX_RXABSDELAY }}
454 }; 485 };
455 486
456 adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; 487 if (num_RxAbsIntDelay > bd) {
457 e1000_validate_option(&adapter->rx_abs_int_delay, &opt, 488 adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
458 adapter); 489 e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
490 adapter);
491 } else {
492 adapter->rx_abs_int_delay = opt.def;
493 }
459 } 494 }
460 { /* Interrupt Throttling Rate */ 495 { /* Interrupt Throttling Rate */
461 struct e1000_option opt = { 496 struct e1000_option opt = {
@@ -467,18 +502,24 @@ e1000_check_options(struct e1000_adapter *adapter)
467 .max = MAX_ITR }} 502 .max = MAX_ITR }}
468 }; 503 };
469 504
470 adapter->itr = InterruptThrottleRate[bd]; 505 if (num_InterruptThrottleRate > bd) {
471 switch (adapter->itr) { 506 adapter->itr = InterruptThrottleRate[bd];
472 case 0: 507 switch (adapter->itr) {
473 DPRINTK(PROBE, INFO, "%s turned off\n", opt.name); 508 case 0:
474 break; 509 DPRINTK(PROBE, INFO, "%s turned off\n",
475 case 1: 510 opt.name);
476 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 511 break;
477 opt.name); 512 case 1:
478 break; 513 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
479 default: 514 opt.name);
480 e1000_validate_option(&adapter->itr, &opt, adapter); 515 break;
481 break; 516 default:
517 e1000_validate_option(&adapter->itr, &opt,
518 adapter);
519 break;
520 }
521 } else {
522 adapter->itr = opt.def;
482 } 523 }
483 } 524 }
484 { /* Smart Power Down */ 525 { /* Smart Power Down */
@@ -489,9 +530,13 @@ e1000_check_options(struct e1000_adapter *adapter)
489 .def = OPTION_DISABLED 530 .def = OPTION_DISABLED
490 }; 531 };
491 532
492 int spd = SmartPowerDownEnable[bd]; 533 if (num_SmartPowerDownEnable > bd) {
493 e1000_validate_option(&spd, &opt, adapter); 534 int spd = SmartPowerDownEnable[bd];
494 adapter->smart_power_down = spd; 535 e1000_validate_option(&spd, &opt, adapter);
536 adapter->smart_power_down = spd;
537 } else {
538 adapter->smart_power_down = opt.def;
539 }
495 } 540 }
496 { /* Kumeran Lock Loss Workaround */ 541 { /* Kumeran Lock Loss Workaround */
497 struct e1000_option opt = { 542 struct e1000_option opt = {
@@ -501,9 +546,13 @@ e1000_check_options(struct e1000_adapter *adapter)
501 .def = OPTION_ENABLED 546 .def = OPTION_ENABLED
502 }; 547 };
503 548
549 if (num_KumeranLockLoss > bd) {
504 int kmrn_lock_loss = KumeranLockLoss[bd]; 550 int kmrn_lock_loss = KumeranLockLoss[bd];
505 e1000_validate_option(&kmrn_lock_loss, &opt, adapter); 551 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
506 adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss; 552 adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
553 } else {
554 adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
555 }
507 } 556 }
508 557
509 switch (adapter->hw.media_type) { 558 switch (adapter->hw.media_type) {
@@ -530,18 +579,17 @@ static void __devinit
530e1000_check_fiber_options(struct e1000_adapter *adapter) 579e1000_check_fiber_options(struct e1000_adapter *adapter)
531{ 580{
532 int bd = adapter->bd_number; 581 int bd = adapter->bd_number;
533 bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd; 582 if (num_Speed > bd) {
534 if ((Speed[bd] != OPTION_UNSET)) {
535 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 583 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
536 "parameter ignored\n"); 584 "parameter ignored\n");
537 } 585 }
538 586
539 if ((Duplex[bd] != OPTION_UNSET)) { 587 if (num_Duplex > bd) {
540 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 588 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
541 "parameter ignored\n"); 589 "parameter ignored\n");
542 } 590 }
543 591
544 if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) { 592 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
545 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 593 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
546 "not valid for fiber adapters, " 594 "not valid for fiber adapters, "
547 "parameter ignored\n"); 595 "parameter ignored\n");
@@ -560,7 +608,6 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
560{ 608{
561 int speed, dplx, an; 609 int speed, dplx, an;
562 int bd = adapter->bd_number; 610 int bd = adapter->bd_number;
563 bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
564 611
565 { /* Speed */ 612 { /* Speed */
566 struct e1000_opt_list speed_list[] = {{ 0, "" }, 613 struct e1000_opt_list speed_list[] = {{ 0, "" },
@@ -577,8 +624,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
577 .p = speed_list }} 624 .p = speed_list }}
578 }; 625 };
579 626
580 speed = Speed[bd]; 627 if (num_Speed > bd) {
581 e1000_validate_option(&speed, &opt, adapter); 628 speed = Speed[bd];
629 e1000_validate_option(&speed, &opt, adapter);
630 } else {
631 speed = opt.def;
632 }
582 } 633 }
583 { /* Duplex */ 634 { /* Duplex */
584 struct e1000_opt_list dplx_list[] = {{ 0, "" }, 635 struct e1000_opt_list dplx_list[] = {{ 0, "" },
@@ -600,11 +651,15 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
600 "Speed/Duplex/AutoNeg parameter ignored.\n"); 651 "Speed/Duplex/AutoNeg parameter ignored.\n");
601 return; 652 return;
602 } 653 }
603 dplx = Duplex[bd]; 654 if (num_Duplex > bd) {
604 e1000_validate_option(&dplx, &opt, adapter); 655 dplx = Duplex[bd];
656 e1000_validate_option(&dplx, &opt, adapter);
657 } else {
658 dplx = opt.def;
659 }
605 } 660 }
606 661
607 if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) { 662 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
608 DPRINTK(PROBE, INFO, 663 DPRINTK(PROBE, INFO,
609 "AutoNeg specified along with Speed or Duplex, " 664 "AutoNeg specified along with Speed or Duplex, "
610 "parameter ignored\n"); 665 "parameter ignored\n");
@@ -653,15 +708,19 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
653 .p = an_list }} 708 .p = an_list }}
654 }; 709 };
655 710
656 an = AutoNeg[bd]; 711 if (num_AutoNeg > bd) {
657 e1000_validate_option(&an, &opt, adapter); 712 an = AutoNeg[bd];
713 e1000_validate_option(&an, &opt, adapter);
714 } else {
715 an = opt.def;
716 }
658 adapter->hw.autoneg_advertised = an; 717 adapter->hw.autoneg_advertised = an;
659 } 718 }
660 719
661 switch (speed + dplx) { 720 switch (speed + dplx) {
662 case 0: 721 case 0:
663 adapter->hw.autoneg = adapter->fc_autoneg = 1; 722 adapter->hw.autoneg = adapter->fc_autoneg = 1;
664 if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET) 723 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
665 DPRINTK(PROBE, INFO, 724 DPRINTK(PROBE, INFO,
666 "Speed and duplex autonegotiation enabled\n"); 725 "Speed and duplex autonegotiation enabled\n");
667 break; 726 break;
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index e5c5cd2a2712..e4e733a380e3 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -425,8 +425,8 @@ MODULE_LICENSE("GPL");
425 425
426/* This is set up so that only a single autoprobe takes place per call. 426/* This is set up so that only a single autoprobe takes place per call.
427ISA device autoprobes on a running machine are not recommended. */ 427ISA device autoprobes on a running machine are not recommended. */
428int 428
429init_module(void) 429int __init init_module(void)
430{ 430{
431 struct net_device *dev; 431 struct net_device *dev;
432 int this_dev, found = 0; 432 int this_dev, found = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 20d31430c74f..bf9efa75390f 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -154,7 +154,7 @@ static const char version[] =
154#include <asm/dma.h> 154#include <asm/dma.h>
155 155
156#define DRV_NAME "eepro" 156#define DRV_NAME "eepro"
157#define DRV_VERSION "0.13b" 157#define DRV_VERSION "0.13c"
158 158
159#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) ) 159#define compat_dev_kfree_skb( skb, mode ) dev_kfree_skb( (skb) )
160/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */ 160/* I had reports of looong delays with SLOW_DOWN defined as udelay(2) */
@@ -1333,7 +1333,6 @@ set_multicast_list(struct net_device *dev)
1333 mode = inb(ioaddr + REG3); 1333 mode = inb(ioaddr + REG3);
1334 outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */ 1334 outb(mode, ioaddr + REG3); /* writing reg. 3 to complete the update */
1335 eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */ 1335 eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
1336 printk(KERN_INFO "%s: promiscuous mode enabled.\n", dev->name);
1337 } 1336 }
1338 1337
1339 else if (dev->mc_count==0 ) 1338 else if (dev->mc_count==0 )
@@ -1807,8 +1806,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
1807MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); 1806MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
1808MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); 1807MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
1809 1808
1810int 1809int __init init_module(void)
1811init_module(void)
1812{ 1810{
1813 struct net_device *dev; 1811 struct net_device *dev;
1814 int i; 1812 int i;
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index e445988c92ee..a3d515def109 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -2385,7 +2385,7 @@ static int __init eepro100_init_module(void)
2385#ifdef MODULE 2385#ifdef MODULE
2386 printk(version); 2386 printk(version);
2387#endif 2387#endif
2388 return pci_module_init(&eepro100_driver); 2388 return pci_register_driver(&eepro100_driver);
2389} 2389}
2390 2390
2391static void __exit eepro100_cleanup_module(void) 2391static void __exit eepro100_cleanup_module(void)
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 33291bcf6d4c..0701c1d810ca 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL");
1698 * are specified, we verify and then use them. If no parameters are given, we 1698 * are specified, we verify and then use them. If no parameters are given, we
1699 * autoprobe for one card only. 1699 * autoprobe for one card only.
1700 */ 1700 */
1701int init_module(void) 1701int __init init_module(void)
1702{ 1702{
1703 struct net_device *dev; 1703 struct net_device *dev;
1704 int this_dev, found = 0; 1704 int this_dev, found = 0;
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index a67650ccf084..b885b2029b49 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -26,8 +26,8 @@
26*/ 26*/
27 27
28#define DRV_NAME "epic100" 28#define DRV_NAME "epic100"
29#define DRV_VERSION "2.0" 29#define DRV_VERSION "2.1"
30#define DRV_RELDATE "June 27, 2006" 30#define DRV_RELDATE "Sept 11, 2006"
31 31
32/* The user-configurable values. 32/* The user-configurable values.
33 These may be modified when a driver module is loaded.*/ 33 These may be modified when a driver module is loaded.*/
@@ -1386,7 +1386,6 @@ static void set_rx_mode(struct net_device *dev)
1386 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1386 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1387 outl(0x002C, ioaddr + RxCtrl); 1387 outl(0x002C, ioaddr + RxCtrl);
1388 /* Unconditionally log net taps. */ 1388 /* Unconditionally log net taps. */
1389 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1390 memset(mc_filter, 0xff, sizeof(mc_filter)); 1389 memset(mc_filter, 0xff, sizeof(mc_filter));
1391 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) { 1390 } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
1392 /* There is apparently a chip bug, so the multicast filter 1391 /* There is apparently a chip bug, so the multicast filter
@@ -1604,7 +1603,7 @@ static int __init epic_init (void)
1604 version, version2, version3); 1603 version, version2, version3);
1605#endif 1604#endif
1606 1605
1607 return pci_module_init (&epic_driver); 1606 return pci_register_driver(&epic_driver);
1608} 1607}
1609 1608
1610 1609
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 6b0ab1eac3fb..fd7b32a24ea4 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
421MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); 421MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
422MODULE_LICENSE("GPL"); 422MODULE_LICENSE("GPL");
423 423
424int 424int __init init_module(void)
425init_module(void)
426{ 425{
427 struct net_device *dev; 426 struct net_device *dev;
428 int this_dev, found = 0; 427 int this_dev, found = 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 4bf76f86d8e9..ca42efa9143c 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,
1434module_param(debug, int, 0); 1434module_param(debug, int, 0);
1435MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); 1435MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
1436 1436
1437int init_module(void) 1437int __init init_module(void)
1438{ 1438{
1439 int this_dev, found = 0; 1439 int this_dev, found = 0;
1440 struct net_device *dev; 1440 struct net_device *dev;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 97d34fee8c1f..56f81a2bec5b 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -25,8 +25,8 @@
25*/ 25*/
26 26
27#define DRV_NAME "fealnx" 27#define DRV_NAME "fealnx"
28#define DRV_VERSION "2.51" 28#define DRV_VERSION "2.52"
29#define DRV_RELDATE "Nov-17-2001" 29#define DRV_RELDATE "Sep-11-2006"
30 30
31static int debug; /* 1-> print debug message */ 31static int debug; /* 1-> print debug message */
32static int max_interrupt_work = 20; 32static int max_interrupt_work = 20;
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
92#include <asm/uaccess.h> 92#include <asm/uaccess.h>
93 93
94/* These identify the driver base version and may not be removed. */ 94/* These identify the driver base version and may not be removed. */
95static char version[] __devinitdata = 95static char version[] =
96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
97 97
98 98
@@ -1800,8 +1800,6 @@ static void __set_rx_mode(struct net_device *dev)
1800 u32 rx_mode; 1800 u32 rx_mode;
1801 1801
1802 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1802 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1803 /* Unconditionally log net taps. */
1804 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1805 memset(mc_filter, 0xff, sizeof(mc_filter)); 1803 memset(mc_filter, 0xff, sizeof(mc_filter));
1806 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM; 1804 rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
1807 } else if ((dev->mc_count > multicast_filter_limit) 1805 } else if ((dev->mc_count > multicast_filter_limit)
@@ -1984,7 +1982,7 @@ static int __init fealnx_init(void)
1984 printk(version); 1982 printk(version);
1985#endif 1983#endif
1986 1984
1987 return pci_module_init(&fealnx_driver); 1985 return pci_register_driver(&fealnx_driver);
1988} 1986}
1989 1987
1990static void __exit fealnx_exit(void) 1988static void __exit fealnx_exit(void)
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9b4030031744..9eedb27dd695 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -2227,8 +2227,6 @@ static void set_multicast_list(struct net_device *dev)
2227 ep = fep->hwp; 2227 ep = fep->hwp;
2228 2228
2229 if (dev->flags&IFF_PROMISC) { 2229 if (dev->flags&IFF_PROMISC) {
2230 /* Log any net taps. */
2231 printk("%s: Promiscuous mode enabled.\n", dev->name);
2232 ep->fec_r_cntrl |= 0x0008; 2230 ep->fec_r_cntrl |= 0x0008;
2233 } else { 2231 } else {
2234 2232
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11b8f1b43dd5..59f9a515c07c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -109,6 +109,7 @@
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. 109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 0.55: 22 Mar 2006: Add flow control (pause frame). 110 * 0.55: 22 Mar 2006: Add flow control (pause frame).
111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support. 111 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
112 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 113 *
113 * Known bugs: 114 * Known bugs:
114 * We suspect that on some hardware no TX done interrupts are generated. 115 * We suspect that on some hardware no TX done interrupts are generated.
@@ -120,7 +121,12 @@
120 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 121 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
121 * superfluous timer interrupts from the nic. 122 * superfluous timer interrupts from the nic.
122 */ 123 */
123#define FORCEDETH_VERSION "0.56" 124#ifdef CONFIG_FORCEDETH_NAPI
125#define DRIVERNAPI "-NAPI"
126#else
127#define DRIVERNAPI
128#endif
129#define FORCEDETH_VERSION "0.57"
124#define DRV_NAME "forcedeth" 130#define DRV_NAME "forcedeth"
125 131
126#include <linux/module.h> 132#include <linux/module.h>
@@ -262,7 +268,8 @@ enum {
262 NvRegRingSizes = 0x108, 268 NvRegRingSizes = 0x108,
263#define NVREG_RINGSZ_TXSHIFT 0 269#define NVREG_RINGSZ_TXSHIFT 0
264#define NVREG_RINGSZ_RXSHIFT 16 270#define NVREG_RINGSZ_RXSHIFT 16
265 NvRegUnknownTransmitterReg = 0x10c, 271 NvRegTransmitPoll = 0x10c,
272#define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
266 NvRegLinkSpeed = 0x110, 273 NvRegLinkSpeed = 0x110,
267#define NVREG_LINKSPEED_FORCE 0x10000 274#define NVREG_LINKSPEED_FORCE 0x10000
268#define NVREG_LINKSPEED_10 1000 275#define NVREG_LINKSPEED_10 1000
@@ -381,21 +388,21 @@ enum {
381 388
382/* Big endian: should work, but is untested */ 389/* Big endian: should work, but is untested */
383struct ring_desc { 390struct ring_desc {
384 u32 PacketBuffer; 391 __le32 buf;
385 u32 FlagLen; 392 __le32 flaglen;
386}; 393};
387 394
388struct ring_desc_ex { 395struct ring_desc_ex {
389 u32 PacketBufferHigh; 396 __le32 bufhigh;
390 u32 PacketBufferLow; 397 __le32 buflow;
391 u32 TxVlan; 398 __le32 txvlan;
392 u32 FlagLen; 399 __le32 flaglen;
393}; 400};
394 401
395typedef union _ring_type { 402union ring_type {
396 struct ring_desc* orig; 403 struct ring_desc* orig;
397 struct ring_desc_ex* ex; 404 struct ring_desc_ex* ex;
398} ring_type; 405};
399 406
400#define FLAG_MASK_V1 0xffff0000 407#define FLAG_MASK_V1 0xffff0000
401#define FLAG_MASK_V2 0xffffc000 408#define FLAG_MASK_V2 0xffffc000
@@ -536,6 +543,9 @@ typedef union _ring_type {
536#define PHYID1_OUI_SHFT 6 543#define PHYID1_OUI_SHFT 6
537#define PHYID2_OUI_MASK 0xfc00 544#define PHYID2_OUI_MASK 0xfc00
538#define PHYID2_OUI_SHFT 10 545#define PHYID2_OUI_SHFT 10
546#define PHYID2_MODEL_MASK 0x03f0
547#define PHY_MODEL_MARVELL_E3016 0x220
548#define PHY_MARVELL_E3016_INITMASK 0x0300
539#define PHY_INIT1 0x0f000 549#define PHY_INIT1 0x0f000
540#define PHY_INIT2 0x0e00 550#define PHY_INIT2 0x0e00
541#define PHY_INIT3 0x01000 551#define PHY_INIT3 0x01000
@@ -653,8 +663,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
653}; 663};
654 664
655struct register_test { 665struct register_test {
656 u32 reg; 666 __le32 reg;
657 u32 mask; 667 __le32 mask;
658}; 668};
659 669
660static const struct register_test nv_registers_test[] = { 670static const struct register_test nv_registers_test[] = {
@@ -694,6 +704,7 @@ struct fe_priv {
694 int phyaddr; 704 int phyaddr;
695 int wolenabled; 705 int wolenabled;
696 unsigned int phy_oui; 706 unsigned int phy_oui;
707 unsigned int phy_model;
697 u16 gigabit; 708 u16 gigabit;
698 int intr_test; 709 int intr_test;
699 710
@@ -707,13 +718,14 @@ struct fe_priv {
707 u32 vlanctl_bits; 718 u32 vlanctl_bits;
708 u32 driver_data; 719 u32 driver_data;
709 u32 register_size; 720 u32 register_size;
721 int rx_csum;
710 722
711 void __iomem *base; 723 void __iomem *base;
712 724
713 /* rx specific fields. 725 /* rx specific fields.
714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 726 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
715 */ 727 */
716 ring_type rx_ring; 728 union ring_type rx_ring;
717 unsigned int cur_rx, refill_rx; 729 unsigned int cur_rx, refill_rx;
718 struct sk_buff **rx_skbuff; 730 struct sk_buff **rx_skbuff;
719 dma_addr_t *rx_dma; 731 dma_addr_t *rx_dma;
@@ -733,7 +745,7 @@ struct fe_priv {
733 /* 745 /*
734 * tx specific fields. 746 * tx specific fields.
735 */ 747 */
736 ring_type tx_ring; 748 union ring_type tx_ring;
737 unsigned int next_tx, nic_tx; 749 unsigned int next_tx, nic_tx;
738 struct sk_buff **tx_skbuff; 750 struct sk_buff **tx_skbuff;
739 dma_addr_t *tx_dma; 751 dma_addr_t *tx_dma;
@@ -826,13 +838,13 @@ static inline void pci_push(u8 __iomem *base)
826 838
827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 839static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
828{ 840{
829 return le32_to_cpu(prd->FlagLen) 841 return le32_to_cpu(prd->flaglen)
830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 842 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
831} 843}
832 844
833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 845static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
834{ 846{
835 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 847 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
836} 848}
837 849
838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 850static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +897,7 @@ static void free_rings(struct net_device *dev)
885 struct fe_priv *np = get_nvpriv(dev); 897 struct fe_priv *np = get_nvpriv(dev);
886 898
887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 899 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
888 if(np->rx_ring.orig) 900 if (np->rx_ring.orig)
889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 901 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
890 np->rx_ring.orig, np->ring_addr); 902 np->rx_ring.orig, np->ring_addr);
891 } else { 903 } else {
@@ -1020,14 +1032,13 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1020 return retval; 1032 return retval;
1021} 1033}
1022 1034
1023static int phy_reset(struct net_device *dev) 1035static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1024{ 1036{
1025 struct fe_priv *np = netdev_priv(dev); 1037 struct fe_priv *np = netdev_priv(dev);
1026 u32 miicontrol; 1038 u32 miicontrol;
1027 unsigned int tries = 0; 1039 unsigned int tries = 0;
1028 1040
1029 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1041 miicontrol = BMCR_RESET | bmcr_setup;
1030 miicontrol |= BMCR_RESET;
1031 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1042 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
1032 return -1; 1043 return -1;
1033 } 1044 }
@@ -1052,6 +1063,16 @@ static int phy_init(struct net_device *dev)
1052 u8 __iomem *base = get_hwbase(dev); 1063 u8 __iomem *base = get_hwbase(dev);
1053 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1064 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
1054 1065
1066 /* phy errata for E3016 phy */
1067 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1068 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1069 reg &= ~PHY_MARVELL_E3016_INITMASK;
1070 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1071 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1072 return PHY_ERROR;
1073 }
1074 }
1075
1055 /* set advertise register */ 1076 /* set advertise register */
1056 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1077 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1057 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1078 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
@@ -1082,8 +1103,13 @@ static int phy_init(struct net_device *dev)
1082 else 1103 else
1083 np->gigabit = 0; 1104 np->gigabit = 0;
1084 1105
1085 /* reset the phy */ 1106 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1086 if (phy_reset(dev)) { 1107 mii_control |= BMCR_ANENABLE;
1108
1109 /* reset the phy
1110 * (certain phys need bmcr to be setup with reset)
1111 */
1112 if (phy_reset(dev, mii_control)) {
1087 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1113 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1088 return PHY_ERROR; 1114 return PHY_ERROR;
1089 } 1115 }
@@ -1178,7 +1204,7 @@ static void nv_stop_tx(struct net_device *dev)
1178 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1204 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1179 1205
1180 udelay(NV_TXSTOP_DELAY2); 1206 udelay(NV_TXSTOP_DELAY2);
1181 writel(0, base + NvRegUnknownTransmitterReg); 1207 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
1182} 1208}
1183 1209
1184static void nv_txrx_reset(struct net_device *dev) 1210static void nv_txrx_reset(struct net_device *dev)
@@ -1258,14 +1284,14 @@ static int nv_alloc_rx(struct net_device *dev)
1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1284 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1259 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1285 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1286 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1261 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1287 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
1262 wmb(); 1288 wmb();
1263 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1289 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1264 } else { 1290 } else {
1265 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1291 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1266 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1292 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1267 wmb(); 1293 wmb();
1268 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1294 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1269 } 1295 }
1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1296 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1271 dev->name, refill_rx); 1297 dev->name, refill_rx);
@@ -1277,6 +1303,16 @@ static int nv_alloc_rx(struct net_device *dev)
1277 return 0; 1303 return 0;
1278} 1304}
1279 1305
1306/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1307#ifdef CONFIG_FORCEDETH_NAPI
1308static void nv_do_rx_refill(unsigned long data)
1309{
1310 struct net_device *dev = (struct net_device *) data;
1311
1312 /* Just reschedule NAPI rx processing */
1313 netif_rx_schedule(dev);
1314}
1315#else
1280static void nv_do_rx_refill(unsigned long data) 1316static void nv_do_rx_refill(unsigned long data)
1281{ 1317{
1282 struct net_device *dev = (struct net_device *) data; 1318 struct net_device *dev = (struct net_device *) data;
@@ -1305,6 +1341,7 @@ static void nv_do_rx_refill(unsigned long data)
1305 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1341 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1306 } 1342 }
1307} 1343}
1344#endif
1308 1345
1309static void nv_init_rx(struct net_device *dev) 1346static void nv_init_rx(struct net_device *dev)
1310{ 1347{
@@ -1315,9 +1352,9 @@ static void nv_init_rx(struct net_device *dev)
1315 np->refill_rx = 0; 1352 np->refill_rx = 0;
1316 for (i = 0; i < np->rx_ring_size; i++) 1353 for (i = 0; i < np->rx_ring_size; i++)
1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1354 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1318 np->rx_ring.orig[i].FlagLen = 0; 1355 np->rx_ring.orig[i].flaglen = 0;
1319 else 1356 else
1320 np->rx_ring.ex[i].FlagLen = 0; 1357 np->rx_ring.ex[i].flaglen = 0;
1321} 1358}
1322 1359
1323static void nv_init_tx(struct net_device *dev) 1360static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1365,9 @@ static void nv_init_tx(struct net_device *dev)
1328 np->next_tx = np->nic_tx = 0; 1365 np->next_tx = np->nic_tx = 0;
1329 for (i = 0; i < np->tx_ring_size; i++) { 1366 for (i = 0; i < np->tx_ring_size; i++) {
1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1367 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1331 np->tx_ring.orig[i].FlagLen = 0; 1368 np->tx_ring.orig[i].flaglen = 0;
1332 else 1369 else
1333 np->tx_ring.ex[i].FlagLen = 0; 1370 np->tx_ring.ex[i].flaglen = 0;
1334 np->tx_skbuff[i] = NULL; 1371 np->tx_skbuff[i] = NULL;
1335 np->tx_dma[i] = 0; 1372 np->tx_dma[i] = 0;
1336 } 1373 }
@@ -1373,9 +1410,9 @@ static void nv_drain_tx(struct net_device *dev)
1373 1410
1374 for (i = 0; i < np->tx_ring_size; i++) { 1411 for (i = 0; i < np->tx_ring_size; i++) {
1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1412 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1376 np->tx_ring.orig[i].FlagLen = 0; 1413 np->tx_ring.orig[i].flaglen = 0;
1377 else 1414 else
1378 np->tx_ring.ex[i].FlagLen = 0; 1415 np->tx_ring.ex[i].flaglen = 0;
1379 if (nv_release_txskb(dev, i)) 1416 if (nv_release_txskb(dev, i))
1380 np->stats.tx_dropped++; 1417 np->stats.tx_dropped++;
1381 } 1418 }
@@ -1387,9 +1424,9 @@ static void nv_drain_rx(struct net_device *dev)
1387 int i; 1424 int i;
1388 for (i = 0; i < np->rx_ring_size; i++) { 1425 for (i = 0; i < np->rx_ring_size; i++) {
1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1426 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1390 np->rx_ring.orig[i].FlagLen = 0; 1427 np->rx_ring.orig[i].flaglen = 0;
1391 else 1428 else
1392 np->rx_ring.ex[i].FlagLen = 0; 1429 np->rx_ring.ex[i].flaglen = 0;
1393 wmb(); 1430 wmb();
1394 if (np->rx_skbuff[i]) { 1431 if (np->rx_skbuff[i]) {
1395 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1432 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1487,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1450 np->tx_dma_len[nr] = bcnt; 1487 np->tx_dma_len[nr] = bcnt;
1451 1488
1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1489 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1453 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1490 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1454 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1491 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1455 } else { 1492 } else {
1456 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1493 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1457 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1494 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1458 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1495 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1459 } 1496 }
1460 tx_flags = np->tx_flags; 1497 tx_flags = np->tx_flags;
1461 offset += bcnt; 1498 offset += bcnt;
1462 size -= bcnt; 1499 size -= bcnt;
1463 } while(size); 1500 } while (size);
1464 1501
1465 /* setup the fragments */ 1502 /* setup the fragments */
1466 for (i = 0; i < fragments; i++) { 1503 for (i = 0; i < fragments; i++) {
@@ -1477,12 +1514,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1477 np->tx_dma_len[nr] = bcnt; 1514 np->tx_dma_len[nr] = bcnt;
1478 1515
1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1516 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1480 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1517 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1481 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1518 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1482 } else { 1519 } else {
1483 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1520 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1484 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1521 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1485 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1522 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1486 } 1523 }
1487 offset += bcnt; 1524 offset += bcnt;
1488 size -= bcnt; 1525 size -= bcnt;
@@ -1491,9 +1528,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1491 1528
1492 /* set last fragment flag */ 1529 /* set last fragment flag */
1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1530 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1494 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1531 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1495 } else { 1532 } else {
1496 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1533 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1497 } 1534 }
1498 1535
1499 np->tx_skbuff[nr] = skb; 1536 np->tx_skbuff[nr] = skb;
@@ -1512,10 +1549,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1512 1549
1513 /* set tx flags */ 1550 /* set tx flags */
1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1551 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1515 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1552 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1516 } else { 1553 } else {
1517 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1554 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
1518 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1555 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1519 } 1556 }
1520 1557
1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1558 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1547,7 +1584,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547static void nv_tx_done(struct net_device *dev) 1584static void nv_tx_done(struct net_device *dev)
1548{ 1585{
1549 struct fe_priv *np = netdev_priv(dev); 1586 struct fe_priv *np = netdev_priv(dev);
1550 u32 Flags; 1587 u32 flags;
1551 unsigned int i; 1588 unsigned int i;
1552 struct sk_buff *skb; 1589 struct sk_buff *skb;
1553 1590
@@ -1555,22 +1592,22 @@ static void nv_tx_done(struct net_device *dev)
1555 i = np->nic_tx % np->tx_ring_size; 1592 i = np->nic_tx % np->tx_ring_size;
1556 1593
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1594 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1558 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1595 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
1559 else 1596 else
1560 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1597 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
1561 1598
1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1599 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1563 dev->name, np->nic_tx, Flags); 1600 dev->name, np->nic_tx, flags);
1564 if (Flags & NV_TX_VALID) 1601 if (flags & NV_TX_VALID)
1565 break; 1602 break;
1566 if (np->desc_ver == DESC_VER_1) { 1603 if (np->desc_ver == DESC_VER_1) {
1567 if (Flags & NV_TX_LASTPACKET) { 1604 if (flags & NV_TX_LASTPACKET) {
1568 skb = np->tx_skbuff[i]; 1605 skb = np->tx_skbuff[i];
1569 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1606 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1607 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1571 if (Flags & NV_TX_UNDERFLOW) 1608 if (flags & NV_TX_UNDERFLOW)
1572 np->stats.tx_fifo_errors++; 1609 np->stats.tx_fifo_errors++;
1573 if (Flags & NV_TX_CARRIERLOST) 1610 if (flags & NV_TX_CARRIERLOST)
1574 np->stats.tx_carrier_errors++; 1611 np->stats.tx_carrier_errors++;
1575 np->stats.tx_errors++; 1612 np->stats.tx_errors++;
1576 } else { 1613 } else {
@@ -1579,13 +1616,13 @@ static void nv_tx_done(struct net_device *dev)
1579 } 1616 }
1580 } 1617 }
1581 } else { 1618 } else {
1582 if (Flags & NV_TX2_LASTPACKET) { 1619 if (flags & NV_TX2_LASTPACKET) {
1583 skb = np->tx_skbuff[i]; 1620 skb = np->tx_skbuff[i];
1584 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1621 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1622 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1586 if (Flags & NV_TX2_UNDERFLOW) 1623 if (flags & NV_TX2_UNDERFLOW)
1587 np->stats.tx_fifo_errors++; 1624 np->stats.tx_fifo_errors++;
1588 if (Flags & NV_TX2_CARRIERLOST) 1625 if (flags & NV_TX2_CARRIERLOST)
1589 np->stats.tx_carrier_errors++; 1626 np->stats.tx_carrier_errors++;
1590 np->stats.tx_errors++; 1627 np->stats.tx_errors++;
1591 } else { 1628 } else {
@@ -1638,29 +1675,29 @@ static void nv_tx_timeout(struct net_device *dev)
1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1675 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1676 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1640 i, 1677 i,
1641 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1678 le32_to_cpu(np->tx_ring.orig[i].buf),
1642 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1679 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1643 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1680 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1644 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1681 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1645 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1682 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1646 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1683 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1647 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1684 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1648 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1685 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1649 } else { 1686 } else {
1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1687 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1651 i, 1688 i,
1652 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1689 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1653 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1690 le32_to_cpu(np->tx_ring.ex[i].buflow),
1654 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1691 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1655 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1692 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1656 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1693 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1657 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1694 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1658 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1695 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1659 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1696 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1660 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1697 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1661 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1698 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1662 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1699 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1663 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1700 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1664 } 1701 }
1665 } 1702 }
1666 } 1703 }
@@ -1697,7 +1734,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1697 int protolen; /* length as stored in the proto field */ 1734 int protolen; /* length as stored in the proto field */
1698 1735
1699 /* 1) calculate len according to header */ 1736 /* 1) calculate len according to header */
1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1737 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1738 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1702 hdrlen = VLAN_HLEN; 1739 hdrlen = VLAN_HLEN;
1703 } else { 1740 } else {
@@ -1740,13 +1777,14 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1740 } 1777 }
1741} 1778}
1742 1779
1743static void nv_rx_process(struct net_device *dev) 1780static int nv_rx_process(struct net_device *dev, int limit)
1744{ 1781{
1745 struct fe_priv *np = netdev_priv(dev); 1782 struct fe_priv *np = netdev_priv(dev);
1746 u32 Flags; 1783 u32 flags;
1747 u32 vlanflags = 0; 1784 u32 vlanflags = 0;
1785 int count;
1748 1786
1749 for (;;) { 1787 for (count = 0; count < limit; ++count) {
1750 struct sk_buff *skb; 1788 struct sk_buff *skb;
1751 int len; 1789 int len;
1752 int i; 1790 int i;
@@ -1755,18 +1793,18 @@ static void nv_rx_process(struct net_device *dev)
1755 1793
1756 i = np->cur_rx % np->rx_ring_size; 1794 i = np->cur_rx % np->rx_ring_size;
1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1795 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1758 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1796 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1797 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1760 } else { 1798 } else {
1761 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1799 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1800 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1801 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1764 } 1802 }
1765 1803
1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1804 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
1767 dev->name, np->cur_rx, Flags); 1805 dev->name, np->cur_rx, flags);
1768 1806
1769 if (Flags & NV_RX_AVAIL) 1807 if (flags & NV_RX_AVAIL)
1770 break; /* still owned by hardware, */ 1808 break; /* still owned by hardware, */
1771 1809
1772 /* 1810 /*
@@ -1780,7 +1818,7 @@ static void nv_rx_process(struct net_device *dev)
1780 1818
1781 { 1819 {
1782 int j; 1820 int j;
1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1821 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1784 for (j=0; j<64; j++) { 1822 for (j=0; j<64; j++) {
1785 if ((j%16) == 0) 1823 if ((j%16) == 0)
1786 dprintk("\n%03x:", j); 1824 dprintk("\n%03x:", j);
@@ -1790,30 +1828,30 @@ static void nv_rx_process(struct net_device *dev)
1790 } 1828 }
1791 /* look at what we actually got: */ 1829 /* look at what we actually got: */
1792 if (np->desc_ver == DESC_VER_1) { 1830 if (np->desc_ver == DESC_VER_1) {
1793 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1831 if (!(flags & NV_RX_DESCRIPTORVALID))
1794 goto next_pkt; 1832 goto next_pkt;
1795 1833
1796 if (Flags & NV_RX_ERROR) { 1834 if (flags & NV_RX_ERROR) {
1797 if (Flags & NV_RX_MISSEDFRAME) { 1835 if (flags & NV_RX_MISSEDFRAME) {
1798 np->stats.rx_missed_errors++; 1836 np->stats.rx_missed_errors++;
1799 np->stats.rx_errors++; 1837 np->stats.rx_errors++;
1800 goto next_pkt; 1838 goto next_pkt;
1801 } 1839 }
1802 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1840 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1803 np->stats.rx_errors++; 1841 np->stats.rx_errors++;
1804 goto next_pkt; 1842 goto next_pkt;
1805 } 1843 }
1806 if (Flags & NV_RX_CRCERR) { 1844 if (flags & NV_RX_CRCERR) {
1807 np->stats.rx_crc_errors++; 1845 np->stats.rx_crc_errors++;
1808 np->stats.rx_errors++; 1846 np->stats.rx_errors++;
1809 goto next_pkt; 1847 goto next_pkt;
1810 } 1848 }
1811 if (Flags & NV_RX_OVERFLOW) { 1849 if (flags & NV_RX_OVERFLOW) {
1812 np->stats.rx_over_errors++; 1850 np->stats.rx_over_errors++;
1813 np->stats.rx_errors++; 1851 np->stats.rx_errors++;
1814 goto next_pkt; 1852 goto next_pkt;
1815 } 1853 }
1816 if (Flags & NV_RX_ERROR4) { 1854 if (flags & NV_RX_ERROR4) {
1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1855 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1818 if (len < 0) { 1856 if (len < 0) {
1819 np->stats.rx_errors++; 1857 np->stats.rx_errors++;
@@ -1821,32 +1859,32 @@ static void nv_rx_process(struct net_device *dev)
1821 } 1859 }
1822 } 1860 }
1823 /* framing errors are soft errors. */ 1861 /* framing errors are soft errors. */
1824 if (Flags & NV_RX_FRAMINGERR) { 1862 if (flags & NV_RX_FRAMINGERR) {
1825 if (Flags & NV_RX_SUBSTRACT1) { 1863 if (flags & NV_RX_SUBSTRACT1) {
1826 len--; 1864 len--;
1827 } 1865 }
1828 } 1866 }
1829 } 1867 }
1830 } else { 1868 } else {
1831 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1869 if (!(flags & NV_RX2_DESCRIPTORVALID))
1832 goto next_pkt; 1870 goto next_pkt;
1833 1871
1834 if (Flags & NV_RX2_ERROR) { 1872 if (flags & NV_RX2_ERROR) {
1835 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1873 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1836 np->stats.rx_errors++; 1874 np->stats.rx_errors++;
1837 goto next_pkt; 1875 goto next_pkt;
1838 } 1876 }
1839 if (Flags & NV_RX2_CRCERR) { 1877 if (flags & NV_RX2_CRCERR) {
1840 np->stats.rx_crc_errors++; 1878 np->stats.rx_crc_errors++;
1841 np->stats.rx_errors++; 1879 np->stats.rx_errors++;
1842 goto next_pkt; 1880 goto next_pkt;
1843 } 1881 }
1844 if (Flags & NV_RX2_OVERFLOW) { 1882 if (flags & NV_RX2_OVERFLOW) {
1845 np->stats.rx_over_errors++; 1883 np->stats.rx_over_errors++;
1846 np->stats.rx_errors++; 1884 np->stats.rx_errors++;
1847 goto next_pkt; 1885 goto next_pkt;
1848 } 1886 }
1849 if (Flags & NV_RX2_ERROR4) { 1887 if (flags & NV_RX2_ERROR4) {
1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1888 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1851 if (len < 0) { 1889 if (len < 0) {
1852 np->stats.rx_errors++; 1890 np->stats.rx_errors++;
@@ -1854,17 +1892,17 @@ static void nv_rx_process(struct net_device *dev)
1854 } 1892 }
1855 } 1893 }
1856 /* framing errors are soft errors */ 1894 /* framing errors are soft errors */
1857 if (Flags & NV_RX2_FRAMINGERR) { 1895 if (flags & NV_RX2_FRAMINGERR) {
1858 if (Flags & NV_RX2_SUBSTRACT1) { 1896 if (flags & NV_RX2_SUBSTRACT1) {
1859 len--; 1897 len--;
1860 } 1898 }
1861 } 1899 }
1862 } 1900 }
1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1901 if (np->rx_csum) {
1864 Flags &= NV_RX2_CHECKSUMMASK; 1902 flags &= NV_RX2_CHECKSUMMASK;
1865 if (Flags == NV_RX2_CHECKSUMOK1 || 1903 if (flags == NV_RX2_CHECKSUMOK1 ||
1866 Flags == NV_RX2_CHECKSUMOK2 || 1904 flags == NV_RX2_CHECKSUMOK2 ||
1867 Flags == NV_RX2_CHECKSUMOK3) { 1905 flags == NV_RX2_CHECKSUMOK3) {
1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1906 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1907 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1870 } else { 1908 } else {
@@ -1880,17 +1918,27 @@ static void nv_rx_process(struct net_device *dev)
1880 skb->protocol = eth_type_trans(skb, dev); 1918 skb->protocol = eth_type_trans(skb, dev);
1881 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n", 1919 dprintk(KERN_DEBUG "%s: nv_rx_process: packet %d with %d bytes, proto %d accepted.\n",
1882 dev->name, np->cur_rx, len, skb->protocol); 1920 dev->name, np->cur_rx, len, skb->protocol);
1883 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT)) { 1921#ifdef CONFIG_FORCEDETH_NAPI
1884 vlan_hwaccel_rx(skb, np->vlangrp, vlanflags & NV_RX3_VLAN_TAG_MASK); 1922 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1885 } else { 1923 vlan_hwaccel_receive_skb(skb, np->vlangrp,
1924 vlanflags & NV_RX3_VLAN_TAG_MASK);
1925 else
1926 netif_receive_skb(skb);
1927#else
1928 if (np->vlangrp && (vlanflags & NV_RX3_VLAN_TAG_PRESENT))
1929 vlan_hwaccel_rx(skb, np->vlangrp,
1930 vlanflags & NV_RX3_VLAN_TAG_MASK);
1931 else
1886 netif_rx(skb); 1932 netif_rx(skb);
1887 } 1933#endif
1888 dev->last_rx = jiffies; 1934 dev->last_rx = jiffies;
1889 np->stats.rx_packets++; 1935 np->stats.rx_packets++;
1890 np->stats.rx_bytes += len; 1936 np->stats.rx_bytes += len;
1891next_pkt: 1937next_pkt:
1892 np->cur_rx++; 1938 np->cur_rx++;
1893 } 1939 }
1940
1941 return count;
1894} 1942}
1895 1943
1896static void set_bufsize(struct net_device *dev) 1944static void set_bufsize(struct net_device *dev)
@@ -1990,7 +2038,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1990 struct fe_priv *np = netdev_priv(dev); 2038 struct fe_priv *np = netdev_priv(dev);
1991 struct sockaddr *macaddr = (struct sockaddr*)addr; 2039 struct sockaddr *macaddr = (struct sockaddr*)addr;
1992 2040
1993 if(!is_valid_ether_addr(macaddr->sa_data)) 2041 if (!is_valid_ether_addr(macaddr->sa_data))
1994 return -EADDRNOTAVAIL; 2042 return -EADDRNOTAVAIL;
1995 2043
1996 /* synchronized against open : rtnl_lock() held by caller */ 2044 /* synchronized against open : rtnl_lock() held by caller */
@@ -2032,7 +2080,6 @@ static void nv_set_multicast(struct net_device *dev)
2032 memset(mask, 0, sizeof(mask)); 2080 memset(mask, 0, sizeof(mask));
2033 2081
2034 if (dev->flags & IFF_PROMISC) { 2082 if (dev->flags & IFF_PROMISC) {
2035 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2036 pff |= NVREG_PFF_PROMISC; 2083 pff |= NVREG_PFF_PROMISC;
2037 } else { 2084 } else {
2038 pff |= NVREG_PFF_MYADDR; 2085 pff |= NVREG_PFF_MYADDR;
@@ -2283,20 +2330,20 @@ set_speed:
2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2330 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2284 2331
2285 switch (adv_pause) { 2332 switch (adv_pause) {
2286 case (ADVERTISE_PAUSE_CAP): 2333 case ADVERTISE_PAUSE_CAP:
2287 if (lpa_pause & LPA_PAUSE_CAP) { 2334 if (lpa_pause & LPA_PAUSE_CAP) {
2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2291 } 2338 }
2292 break; 2339 break;
2293 case (ADVERTISE_PAUSE_ASYM): 2340 case ADVERTISE_PAUSE_ASYM:
2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2341 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2295 { 2342 {
2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2343 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2297 } 2344 }
2298 break; 2345 break;
2299 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2346 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2300 if (lpa_pause & LPA_PAUSE_CAP) 2347 if (lpa_pause & LPA_PAUSE_CAP)
2301 { 2348 {
2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2349 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -2376,14 +2423,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2376 nv_tx_done(dev); 2423 nv_tx_done(dev);
2377 spin_unlock(&np->lock); 2424 spin_unlock(&np->lock);
2378 2425
2379 nv_rx_process(dev);
2380 if (nv_alloc_rx(dev)) {
2381 spin_lock(&np->lock);
2382 if (!np->in_shutdown)
2383 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2384 spin_unlock(&np->lock);
2385 }
2386
2387 if (events & NVREG_IRQ_LINK) { 2426 if (events & NVREG_IRQ_LINK) {
2388 spin_lock(&np->lock); 2427 spin_lock(&np->lock);
2389 nv_link_irq(dev); 2428 nv_link_irq(dev);
@@ -2403,6 +2442,29 @@ static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs)
2403 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n", 2442 printk(KERN_DEBUG "%s: received irq with unknown events 0x%x. Please report\n",
2404 dev->name, events); 2443 dev->name, events);
2405 } 2444 }
2445#ifdef CONFIG_FORCEDETH_NAPI
2446 if (events & NVREG_IRQ_RX_ALL) {
2447 netif_rx_schedule(dev);
2448
2449 /* Disable furthur receive irq's */
2450 spin_lock(&np->lock);
2451 np->irqmask &= ~NVREG_IRQ_RX_ALL;
2452
2453 if (np->msi_flags & NV_MSI_X_ENABLED)
2454 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2455 else
2456 writel(np->irqmask, base + NvRegIrqMask);
2457 spin_unlock(&np->lock);
2458 }
2459#else
2460 nv_rx_process(dev, dev->weight);
2461 if (nv_alloc_rx(dev)) {
2462 spin_lock(&np->lock);
2463 if (!np->in_shutdown)
2464 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2465 spin_unlock(&np->lock);
2466 }
2467#endif
2406 if (i > max_interrupt_work) { 2468 if (i > max_interrupt_work) {
2407 spin_lock(&np->lock); 2469 spin_lock(&np->lock);
2408 /* disable interrupts on the nic */ 2470 /* disable interrupts on the nic */
@@ -2474,6 +2536,63 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2474 return IRQ_RETVAL(i); 2536 return IRQ_RETVAL(i);
2475} 2537}
2476 2538
2539#ifdef CONFIG_FORCEDETH_NAPI
2540static int nv_napi_poll(struct net_device *dev, int *budget)
2541{
2542 int pkts, limit = min(*budget, dev->quota);
2543 struct fe_priv *np = netdev_priv(dev);
2544 u8 __iomem *base = get_hwbase(dev);
2545
2546 pkts = nv_rx_process(dev, limit);
2547
2548 if (nv_alloc_rx(dev)) {
2549 spin_lock_irq(&np->lock);
2550 if (!np->in_shutdown)
2551 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2552 spin_unlock_irq(&np->lock);
2553 }
2554
2555 if (pkts < limit) {
2556 /* all done, no more packets present */
2557 netif_rx_complete(dev);
2558
2559 /* re-enable receive interrupts */
2560 spin_lock_irq(&np->lock);
2561 np->irqmask |= NVREG_IRQ_RX_ALL;
2562 if (np->msi_flags & NV_MSI_X_ENABLED)
2563 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2564 else
2565 writel(np->irqmask, base + NvRegIrqMask);
2566 spin_unlock_irq(&np->lock);
2567 return 0;
2568 } else {
2569 /* used up our quantum, so reschedule */
2570 dev->quota -= pkts;
2571 *budget -= pkts;
2572 return 1;
2573 }
2574}
2575#endif
2576
2577#ifdef CONFIG_FORCEDETH_NAPI
2578static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2579{
2580 struct net_device *dev = (struct net_device *) data;
2581 u8 __iomem *base = get_hwbase(dev);
2582 u32 events;
2583
2584 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
2585 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
2586
2587 if (events) {
2588 netif_rx_schedule(dev);
2589 /* disable receive interrupts on the nic */
2590 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2591 pci_push(base);
2592 }
2593 return IRQ_HANDLED;
2594}
2595#else
2477static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) 2596static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2478{ 2597{
2479 struct net_device *dev = (struct net_device *) data; 2598 struct net_device *dev = (struct net_device *) data;
@@ -2492,7 +2611,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2492 if (!(events & np->irqmask)) 2611 if (!(events & np->irqmask))
2493 break; 2612 break;
2494 2613
2495 nv_rx_process(dev); 2614 nv_rx_process(dev, dev->weight);
2496 if (nv_alloc_rx(dev)) { 2615 if (nv_alloc_rx(dev)) {
2497 spin_lock_irq(&np->lock); 2616 spin_lock_irq(&np->lock);
2498 if (!np->in_shutdown) 2617 if (!np->in_shutdown)
@@ -2514,12 +2633,12 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2514 spin_unlock_irq(&np->lock); 2633 spin_unlock_irq(&np->lock);
2515 break; 2634 break;
2516 } 2635 }
2517
2518 } 2636 }
2519 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name); 2637 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
2520 2638
2521 return IRQ_RETVAL(i); 2639 return IRQ_RETVAL(i);
2522} 2640}
2641#endif
2523 2642
2524static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) 2643static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2525{ 2644{
@@ -3057,9 +3176,18 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3057 if (netif_running(dev)) 3176 if (netif_running(dev))
3058 printk(KERN_INFO "%s: link down.\n", dev->name); 3177 printk(KERN_INFO "%s: link down.\n", dev->name);
3059 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3178 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3060 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3179 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3061 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3180 bmcr |= BMCR_ANENABLE;
3062 3181 /* reset the phy in order for settings to stick,
3182 * and cause autoneg to start */
3183 if (phy_reset(dev, bmcr)) {
3184 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3185 return -EINVAL;
3186 }
3187 } else {
3188 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3189 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3190 }
3063 } else { 3191 } else {
3064 int adv, bmcr; 3192 int adv, bmcr;
3065 3193
@@ -3099,17 +3227,19 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3099 bmcr |= BMCR_FULLDPLX; 3227 bmcr |= BMCR_FULLDPLX;
3100 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL)) 3228 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
3101 bmcr |= BMCR_SPEED100; 3229 bmcr |= BMCR_SPEED100;
3102 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3103 if (np->phy_oui == PHY_OUI_MARVELL) { 3230 if (np->phy_oui == PHY_OUI_MARVELL) {
3104 /* reset the phy */ 3231 /* reset the phy in order for forced mode settings to stick */
3105 if (phy_reset(dev)) { 3232 if (phy_reset(dev, bmcr)) {
3106 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 3233 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3107 return -EINVAL; 3234 return -EINVAL;
3108 } 3235 }
3109 } else if (netif_running(dev)) { 3236 } else {
3110 /* Wait a bit and then reconfigure the nic. */ 3237 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3111 udelay(10); 3238 if (netif_running(dev)) {
3112 nv_linkchange(dev); 3239 /* Wait a bit and then reconfigure the nic. */
3240 udelay(10);
3241 nv_linkchange(dev);
3242 }
3113 } 3243 }
3114 } 3244 }
3115 3245
@@ -3166,8 +3296,17 @@ static int nv_nway_reset(struct net_device *dev)
3166 } 3296 }
3167 3297
3168 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 3298 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
3169 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 3299 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
3170 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 3300 bmcr |= BMCR_ANENABLE;
3301 /* reset the phy in order for settings to stick*/
3302 if (phy_reset(dev, bmcr)) {
3303 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
3304 return -EINVAL;
3305 }
3306 } else {
3307 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
3308 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
3309 }
3171 3310
3172 if (netif_running(dev)) { 3311 if (netif_running(dev)) {
3173 nv_start_rx(dev); 3312 nv_start_rx(dev);
@@ -3245,7 +3384,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3384 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3246 /* fall back to old rings */ 3385 /* fall back to old rings */
3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3386 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3248 if(rxtx_ring) 3387 if (rxtx_ring)
3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3388 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3250 rxtx_ring, ring_addr); 3389 rxtx_ring, ring_addr);
3251 } else { 3390 } else {
@@ -3418,7 +3557,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
3418static u32 nv_get_rx_csum(struct net_device *dev) 3557static u32 nv_get_rx_csum(struct net_device *dev)
3419{ 3558{
3420 struct fe_priv *np = netdev_priv(dev); 3559 struct fe_priv *np = netdev_priv(dev);
3421 return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0; 3560 return (np->rx_csum) != 0;
3422} 3561}
3423 3562
3424static int nv_set_rx_csum(struct net_device *dev, u32 data) 3563static int nv_set_rx_csum(struct net_device *dev, u32 data)
@@ -3428,22 +3567,15 @@ static int nv_set_rx_csum(struct net_device *dev, u32 data)
3428 int retcode = 0; 3567 int retcode = 0;
3429 3568
3430 if (np->driver_data & DEV_HAS_CHECKSUM) { 3569 if (np->driver_data & DEV_HAS_CHECKSUM) {
3431
3432 if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
3433 (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
3434 /* already set or unset */
3435 return 0;
3436 }
3437
3438 if (data) { 3570 if (data) {
3571 np->rx_csum = 1;
3439 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 3572 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
3440 } else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
3441 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3442 } else { 3573 } else {
3443 printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n"); 3574 np->rx_csum = 0;
3444 return -EINVAL; 3575 /* vlan is dependent on rx checksum offload */
3576 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
3577 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
3445 } 3578 }
3446
3447 if (netif_running(dev)) { 3579 if (netif_running(dev)) {
3448 spin_lock_irq(&np->lock); 3580 spin_lock_irq(&np->lock);
3449 writel(np->txrxctl_bits, base + NvRegTxRxControl); 3581 writel(np->txrxctl_bits, base + NvRegTxRxControl);
@@ -3481,7 +3613,7 @@ static int nv_get_stats_count(struct net_device *dev)
3481 struct fe_priv *np = netdev_priv(dev); 3613 struct fe_priv *np = netdev_priv(dev);
3482 3614
3483 if (np->driver_data & DEV_HAS_STATISTICS) 3615 if (np->driver_data & DEV_HAS_STATISTICS)
3484 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3616 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
3485 else 3617 else
3486 return 0; 3618 return 0;
3487} 3619}
@@ -3619,7 +3751,7 @@ static int nv_loopback_test(struct net_device *dev)
3619 struct sk_buff *tx_skb, *rx_skb; 3751 struct sk_buff *tx_skb, *rx_skb;
3620 dma_addr_t test_dma_addr; 3752 dma_addr_t test_dma_addr;
3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3753 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3622 u32 Flags; 3754 u32 flags;
3623 int len, i, pkt_len; 3755 int len, i, pkt_len;
3624 u8 *pkt_data; 3756 u8 *pkt_data;
3625 u32 filter_flags = 0; 3757 u32 filter_flags = 0;
@@ -3663,12 +3795,12 @@ static int nv_loopback_test(struct net_device *dev)
3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3795 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3664 3796
3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3797 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3666 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3798 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
3667 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3799 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3668 } else { 3800 } else {
3669 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3801 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
3670 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3802 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3671 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3803 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3672 } 3804 }
3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3805 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3674 pci_push(get_hwbase(dev)); 3806 pci_push(get_hwbase(dev));
@@ -3677,21 +3809,21 @@ static int nv_loopback_test(struct net_device *dev)
3677 3809
3678 /* check for rx of the packet */ 3810 /* check for rx of the packet */
3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3811 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3680 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3812 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3813 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3682 3814
3683 } else { 3815 } else {
3684 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3816 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3817 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3686 } 3818 }
3687 3819
3688 if (Flags & NV_RX_AVAIL) { 3820 if (flags & NV_RX_AVAIL) {
3689 ret = 0; 3821 ret = 0;
3690 } else if (np->desc_ver == DESC_VER_1) { 3822 } else if (np->desc_ver == DESC_VER_1) {
3691 if (Flags & NV_RX_ERROR) 3823 if (flags & NV_RX_ERROR)
3692 ret = 0; 3824 ret = 0;
3693 } else { 3825 } else {
3694 if (Flags & NV_RX2_ERROR) { 3826 if (flags & NV_RX2_ERROR) {
3695 ret = 0; 3827 ret = 0;
3696 } 3828 }
3697 } 3829 }
@@ -3753,6 +3885,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3753 if (test->flags & ETH_TEST_FL_OFFLINE) { 3885 if (test->flags & ETH_TEST_FL_OFFLINE) {
3754 if (netif_running(dev)) { 3886 if (netif_running(dev)) {
3755 netif_stop_queue(dev); 3887 netif_stop_queue(dev);
3888 netif_poll_disable(dev);
3756 netif_tx_lock_bh(dev); 3889 netif_tx_lock_bh(dev);
3757 spin_lock_irq(&np->lock); 3890 spin_lock_irq(&np->lock);
3758 nv_disable_hw_interrupts(dev, np->irqmask); 3891 nv_disable_hw_interrupts(dev, np->irqmask);
@@ -3811,6 +3944,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
3811 nv_start_rx(dev); 3944 nv_start_rx(dev);
3812 nv_start_tx(dev); 3945 nv_start_tx(dev);
3813 netif_start_queue(dev); 3946 netif_start_queue(dev);
3947 netif_poll_enable(dev);
3814 nv_enable_hw_interrupts(dev, np->irqmask); 3948 nv_enable_hw_interrupts(dev, np->irqmask);
3815 } 3949 }
3816 } 3950 }
@@ -3895,10 +4029,9 @@ static int nv_open(struct net_device *dev)
3895 4029
3896 dprintk(KERN_DEBUG "nv_open: begin\n"); 4030 dprintk(KERN_DEBUG "nv_open: begin\n");
3897 4031
3898 /* 1) erase previous misconfiguration */ 4032 /* erase previous misconfiguration */
3899 if (np->driver_data & DEV_HAS_POWER_CNTRL) 4033 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3900 nv_mac_reset(dev); 4034 nv_mac_reset(dev);
3901 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
3902 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 4035 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
3903 writel(0, base + NvRegMulticastAddrB); 4036 writel(0, base + NvRegMulticastAddrB);
3904 writel(0, base + NvRegMulticastMaskA); 4037 writel(0, base + NvRegMulticastMaskA);
@@ -3913,26 +4046,22 @@ static int nv_open(struct net_device *dev)
3913 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) 4046 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
3914 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame); 4047 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3915 4048
3916 /* 2) initialize descriptor rings */ 4049 /* initialize descriptor rings */
3917 set_bufsize(dev); 4050 set_bufsize(dev);
3918 oom = nv_init_ring(dev); 4051 oom = nv_init_ring(dev);
3919 4052
3920 writel(0, base + NvRegLinkSpeed); 4053 writel(0, base + NvRegLinkSpeed);
3921 writel(0, base + NvRegUnknownTransmitterReg); 4054 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
3922 nv_txrx_reset(dev); 4055 nv_txrx_reset(dev);
3923 writel(0, base + NvRegUnknownSetupReg6); 4056 writel(0, base + NvRegUnknownSetupReg6);
3924 4057
3925 np->in_shutdown = 0; 4058 np->in_shutdown = 0;
3926 4059
3927 /* 3) set mac address */ 4060 /* give hw rings */
3928 nv_copy_mac_to_hw(dev);
3929
3930 /* 4) give hw rings */
3931 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4061 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3932 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4062 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3933 base + NvRegRingSizes); 4063 base + NvRegRingSizes);
3934 4064
3935 /* 5) continue setup */
3936 writel(np->linkspeed, base + NvRegLinkSpeed); 4065 writel(np->linkspeed, base + NvRegLinkSpeed);
3937 if (np->desc_ver == DESC_VER_1) 4066 if (np->desc_ver == DESC_VER_1)
3938 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark); 4067 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
@@ -3950,7 +4079,6 @@ static int nv_open(struct net_device *dev)
3950 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4079 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3951 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 4080 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
3952 4081
3953 /* 6) continue setup */
3954 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1); 4082 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
3955 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus); 4083 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
3956 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags); 4084 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
@@ -4020,6 +4148,8 @@ static int nv_open(struct net_device *dev)
4020 nv_start_rx(dev); 4148 nv_start_rx(dev);
4021 nv_start_tx(dev); 4149 nv_start_tx(dev);
4022 netif_start_queue(dev); 4150 netif_start_queue(dev);
4151 netif_poll_enable(dev);
4152
4023 if (ret) { 4153 if (ret) {
4024 netif_carrier_on(dev); 4154 netif_carrier_on(dev);
4025 } else { 4155 } else {
@@ -4049,6 +4179,7 @@ static int nv_close(struct net_device *dev)
4049 spin_lock_irq(&np->lock); 4179 spin_lock_irq(&np->lock);
4050 np->in_shutdown = 1; 4180 np->in_shutdown = 1;
4051 spin_unlock_irq(&np->lock); 4181 spin_unlock_irq(&np->lock);
4182 netif_poll_disable(dev);
4052 synchronize_irq(dev->irq); 4183 synchronize_irq(dev->irq);
4053 4184
4054 del_timer_sync(&np->oom_kick); 4185 del_timer_sync(&np->oom_kick);
@@ -4076,12 +4207,6 @@ static int nv_close(struct net_device *dev)
4076 if (np->wolenabled) 4207 if (np->wolenabled)
4077 nv_start_rx(dev); 4208 nv_start_rx(dev);
4078 4209
4079 /* special op: write back the misordered MAC address - otherwise
4080 * the next nv_probe would see a wrong address.
4081 */
4082 writel(np->orig_mac[0], base + NvRegMacAddrA);
4083 writel(np->orig_mac[1], base + NvRegMacAddrB);
4084
4085 /* FIXME: power down nic */ 4210 /* FIXME: power down nic */
4086 4211
4087 return 0; 4212 return 0;
@@ -4094,7 +4219,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4094 unsigned long addr; 4219 unsigned long addr;
4095 u8 __iomem *base; 4220 u8 __iomem *base;
4096 int err, i; 4221 int err, i;
4097 u32 powerstate; 4222 u32 powerstate, txreg;
4098 4223
4099 dev = alloc_etherdev(sizeof(struct fe_priv)); 4224 dev = alloc_etherdev(sizeof(struct fe_priv));
4100 err = -ENOMEM; 4225 err = -ENOMEM;
@@ -4190,6 +4315,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4190 np->pkt_limit = NV_PKTLIMIT_2; 4315 np->pkt_limit = NV_PKTLIMIT_2;
4191 4316
4192 if (id->driver_data & DEV_HAS_CHECKSUM) { 4317 if (id->driver_data & DEV_HAS_CHECKSUM) {
4318 np->rx_csum = 1;
4193 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 4319 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4194 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 4320 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
4195#ifdef NETIF_F_TSO 4321#ifdef NETIF_F_TSO
@@ -4270,6 +4396,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4270#ifdef CONFIG_NET_POLL_CONTROLLER 4396#ifdef CONFIG_NET_POLL_CONTROLLER
4271 dev->poll_controller = nv_poll_controller; 4397 dev->poll_controller = nv_poll_controller;
4272#endif 4398#endif
4399 dev->weight = 64;
4400#ifdef CONFIG_FORCEDETH_NAPI
4401 dev->poll = nv_napi_poll;
4402#endif
4273 SET_ETHTOOL_OPS(dev, &ops); 4403 SET_ETHTOOL_OPS(dev, &ops);
4274 dev->tx_timeout = nv_tx_timeout; 4404 dev->tx_timeout = nv_tx_timeout;
4275 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 4405 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
@@ -4281,12 +4411,30 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4281 np->orig_mac[0] = readl(base + NvRegMacAddrA); 4411 np->orig_mac[0] = readl(base + NvRegMacAddrA);
4282 np->orig_mac[1] = readl(base + NvRegMacAddrB); 4412 np->orig_mac[1] = readl(base + NvRegMacAddrB);
4283 4413
4284 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff; 4414 /* check the workaround bit for correct mac address order */
4285 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff; 4415 txreg = readl(base + NvRegTransmitPoll);
4286 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff; 4416 if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
4287 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff; 4417 /* mac address is already in correct order */
4288 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 4418 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
4289 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 4419 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
4420 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
4421 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
4422 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
4423 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
4424 } else {
4425 /* need to reverse mac address to correct order */
4426 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
4427 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
4428 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
4429 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
4430 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
4431 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
4432 /* set permanent address to be correct aswell */
4433 np->orig_mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
4434 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
4435 np->orig_mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
4436 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
4437 }
4290 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 4438 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4291 4439
4292 if (!is_valid_ether_addr(dev->perm_addr)) { 4440 if (!is_valid_ether_addr(dev->perm_addr)) {
@@ -4309,6 +4457,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4309 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 4457 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
4310 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 4458 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
4311 4459
4460 /* set mac address */
4461 nv_copy_mac_to_hw(dev);
4462
4312 /* disable WOL */ 4463 /* disable WOL */
4313 writel(0, base + NvRegWakeUpFlags); 4464 writel(0, base + NvRegWakeUpFlags);
4314 np->wolenabled = 0; 4465 np->wolenabled = 0;
@@ -4369,6 +4520,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
4369 if (id2 < 0 || id2 == 0xffff) 4520 if (id2 < 0 || id2 == 0xffff)
4370 continue; 4521 continue;
4371 4522
4523 np->phy_model = id2 & PHYID2_MODEL_MASK;
4372 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 4524 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
4373 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 4525 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
4374 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n", 4526 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
@@ -4421,9 +4573,17 @@ out:
4421static void __devexit nv_remove(struct pci_dev *pci_dev) 4573static void __devexit nv_remove(struct pci_dev *pci_dev)
4422{ 4574{
4423 struct net_device *dev = pci_get_drvdata(pci_dev); 4575 struct net_device *dev = pci_get_drvdata(pci_dev);
4576 struct fe_priv *np = netdev_priv(dev);
4577 u8 __iomem *base = get_hwbase(dev);
4424 4578
4425 unregister_netdev(dev); 4579 unregister_netdev(dev);
4426 4580
4581 /* special op: write back the misordered MAC address - otherwise
4582 * the next nv_probe would see a wrong address.
4583 */
4584 writel(np->orig_mac[0], base + NvRegMacAddrA);
4585 writel(np->orig_mac[1], base + NvRegMacAddrB);
4586
4427 /* free all structures */ 4587 /* free all structures */
4428 free_rings(dev); 4588 free_rings(dev);
4429 iounmap(get_hwbase(dev)); 4589 iounmap(get_hwbase(dev));
@@ -4540,7 +4700,7 @@ static struct pci_driver driver = {
4540static int __init init_nic(void) 4700static int __init init_nic(void)
4541{ 4701{
4542 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); 4702 printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION);
4543 return pci_module_init(&driver); 4703 return pci_register_driver(&driver);
4544} 4704}
4545 4705
4546static void __exit exit_nic(void) 4706static void __exit exit_nic(void)
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index d6dd3f2fb43e..02d4dc18ba69 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_FS_ENET) += fs_enet.o 5obj-$(CONFIG_FS_ENET) += fs_enet.o
6 6
7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o 7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
8obj-$(CONFIG_8260) += mac-fcc.o 8obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
9 9
10fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o 10fs_enet-objs := fs_enet-main.o
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h
new file mode 100644
index 000000000000..e980527e2b99
--- /dev/null
+++ b/drivers/net/fs_enet/fec.h
@@ -0,0 +1,42 @@
1#ifndef FS_ENET_FEC_H
2#define FS_ENET_FEC_H
3
4/* CRC polynomium used by the FEC for the multicast group filtering */
5#define FEC_CRC_POLY 0x04C11DB7
6
7#define FEC_MAX_MULTICAST_ADDRS 64
8
9/* Interrupt events/masks.
10*/
11#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
12#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
13#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
14#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
15#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
16#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
17#define FEC_ENET_RXF 0x02000000U /* Full frame received */
18#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
19#define FEC_ENET_MII 0x00800000U /* MII interrupt */
20#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
21
22#define FEC_ECNTRL_PINMUX 0x00000004
23#define FEC_ECNTRL_ETHER_EN 0x00000002
24#define FEC_ECNTRL_RESET 0x00000001
25
26#define FEC_RCNTRL_BC_REJ 0x00000010
27#define FEC_RCNTRL_PROM 0x00000008
28#define FEC_RCNTRL_MII_MODE 0x00000004
29#define FEC_RCNTRL_DRT 0x00000002
30#define FEC_RCNTRL_LOOP 0x00000001
31
32#define FEC_TCNTRL_FDEN 0x00000004
33#define FEC_TCNTRL_HBC 0x00000002
34#define FEC_TCNTRL_GTS 0x00000001
35
36
37
38/*
39 * Delay to wait for FEC reset command to complete (in us)
40 */
41#define FEC_RESET_DELAY 50
42#endif
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f6abff5846b3..df62506a1787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/phy.h>
40 41
41#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq)
682 (*fep->ops->post_free_irq)(dev, irq); 683 (*fep->ops->post_free_irq)(dev, irq);
683} 684}
684 685
685/**********************************************************************************/
686
687/* This interrupt occurs when the PHY detects a link change. */
688static irqreturn_t
689fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
690{
691 struct net_device *dev = dev_id;
692 struct fs_enet_private *fep;
693 const struct fs_platform_info *fpi;
694
695 fep = netdev_priv(dev);
696 fpi = fep->fpi;
697
698 /*
699 * Acknowledge the interrupt if possible. If we have not
700 * found the PHY yet we can't process or acknowledge the
701 * interrupt now. Instead we ignore this interrupt for now,
702 * which we can do since it is edge triggered. It will be
703 * acknowledged later by fs_enet_open().
704 */
705 if (!fep->phy)
706 return IRQ_NONE;
707
708 fs_mii_ack_int(dev);
709 fs_mii_link_status_change_check(dev, 0);
710
711 return IRQ_HANDLED;
712}
713
714static void fs_timeout(struct net_device *dev) 686static void fs_timeout(struct net_device *dev)
715{ 687{
716 struct fs_enet_private *fep = netdev_priv(dev); 688 struct fs_enet_private *fep = netdev_priv(dev);
@@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev)
722 spin_lock_irqsave(&fep->lock, flags); 694 spin_lock_irqsave(&fep->lock, flags);
723 695
724 if (dev->flags & IFF_UP) { 696 if (dev->flags & IFF_UP) {
697 phy_stop(fep->phydev);
725 (*fep->ops->stop)(dev); 698 (*fep->ops->stop)(dev);
726 (*fep->ops->restart)(dev); 699 (*fep->ops->restart)(dev);
700 phy_start(fep->phydev);
727 } 701 }
728 702
703 phy_start(fep->phydev);
729 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 704 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
730 spin_unlock_irqrestore(&fep->lock, flags); 705 spin_unlock_irqrestore(&fep->lock, flags);
731 706
@@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev)
733 netif_wake_queue(dev); 708 netif_wake_queue(dev);
734} 709}
735 710
711/*-----------------------------------------------------------------------------
712 * generic link-change handler - should be sufficient for most cases
713 *-----------------------------------------------------------------------------*/
714static void generic_adjust_link(struct net_device *dev)
715{
716 struct fs_enet_private *fep = netdev_priv(dev);
717 struct phy_device *phydev = fep->phydev;
718 int new_state = 0;
719
720 if (phydev->link) {
721
722 /* adjust to duplex mode */
723 if (phydev->duplex != fep->oldduplex){
724 new_state = 1;
725 fep->oldduplex = phydev->duplex;
726 }
727
728 if (phydev->speed != fep->oldspeed) {
729 new_state = 1;
730 fep->oldspeed = phydev->speed;
731 }
732
733 if (!fep->oldlink) {
734 new_state = 1;
735 fep->oldlink = 1;
736 netif_schedule(dev);
737 netif_carrier_on(dev);
738 netif_start_queue(dev);
739 }
740
741 if (new_state)
742 fep->ops->restart(dev);
743
744 } else if (fep->oldlink) {
745 new_state = 1;
746 fep->oldlink = 0;
747 fep->oldspeed = 0;
748 fep->oldduplex = -1;
749 netif_carrier_off(dev);
750 netif_stop_queue(dev);
751 }
752
753 if (new_state && netif_msg_link(fep))
754 phy_print_status(phydev);
755}
756
757
758static void fs_adjust_link(struct net_device *dev)
759{
760 struct fs_enet_private *fep = netdev_priv(dev);
761 unsigned long flags;
762
763 spin_lock_irqsave(&fep->lock, flags);
764
765 if(fep->ops->adjust_link)
766 fep->ops->adjust_link(dev);
767 else
768 generic_adjust_link(dev);
769
770 spin_unlock_irqrestore(&fep->lock, flags);
771}
772
773static int fs_init_phy(struct net_device *dev)
774{
775 struct fs_enet_private *fep = netdev_priv(dev);
776 struct phy_device *phydev;
777
778 fep->oldlink = 0;
779 fep->oldspeed = 0;
780 fep->oldduplex = -1;
781 if(fep->fpi->bus_id)
782 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
783 else {
784 printk("No phy bus ID specified in BSP code\n");
785 return -EINVAL;
786 }
787 if (IS_ERR(phydev)) {
788 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
789 return PTR_ERR(phydev);
790 }
791
792 fep->phydev = phydev;
793
794 return 0;
795}
796
797
736static int fs_enet_open(struct net_device *dev) 798static int fs_enet_open(struct net_device *dev)
737{ 799{
738 struct fs_enet_private *fep = netdev_priv(dev); 800 struct fs_enet_private *fep = netdev_priv(dev);
739 const struct fs_platform_info *fpi = fep->fpi;
740 int r; 801 int r;
802 int err;
741 803
742 /* Install our interrupt handler. */ 804 /* Install our interrupt handler. */
743 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); 805 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
744 if (r != 0) { 806 if (r != 0) {
745 printk(KERN_ERR DRV_MODULE_NAME 807 printk(KERN_ERR DRV_MODULE_NAME
746 ": %s Could not allocate FEC IRQ!", dev->name); 808 ": %s Could not allocate FS_ENET IRQ!", dev->name);
747 return -EINVAL; 809 return -EINVAL;
748 } 810 }
749 811
750 /* Install our phy interrupt handler */ 812 err = fs_init_phy(dev);
751 if (fpi->phy_irq != -1) { 813 if(err)
752 814 return err;
753 r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
754 if (r != 0) {
755 printk(KERN_ERR DRV_MODULE_NAME
756 ": %s Could not allocate PHY IRQ!", dev->name);
757 fs_free_irq(dev, fep->interrupt);
758 return -EINVAL;
759 }
760 }
761 815
762 fs_mii_startup(dev); 816 phy_start(fep->phydev);
763 netif_carrier_off(dev);
764 fs_mii_link_status_change_check(dev, 1);
765 817
766 return 0; 818 return 0;
767} 819}
@@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev)
769static int fs_enet_close(struct net_device *dev) 821static int fs_enet_close(struct net_device *dev)
770{ 822{
771 struct fs_enet_private *fep = netdev_priv(dev); 823 struct fs_enet_private *fep = netdev_priv(dev);
772 const struct fs_platform_info *fpi = fep->fpi;
773 unsigned long flags; 824 unsigned long flags;
774 825
775 netif_stop_queue(dev); 826 netif_stop_queue(dev);
776 netif_carrier_off(dev); 827 netif_carrier_off(dev);
777 fs_mii_shutdown(dev); 828 phy_stop(fep->phydev);
778 829
779 spin_lock_irqsave(&fep->lock, flags); 830 spin_lock_irqsave(&fep->lock, flags);
780 (*fep->ops->stop)(dev); 831 (*fep->ops->stop)(dev);
781 spin_unlock_irqrestore(&fep->lock, flags); 832 spin_unlock_irqrestore(&fep->lock, flags);
782 833
783 /* release any irqs */ 834 /* release any irqs */
784 if (fpi->phy_irq != -1) 835 phy_disconnect(fep->phydev);
785 fs_free_irq(dev, fpi->phy_irq); 836 fep->phydev = NULL;
786 fs_free_irq(dev, fep->interrupt); 837 fs_free_irq(dev, fep->interrupt);
787 838
788 return 0; 839 return 0;
@@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
830static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 881static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
831{ 882{
832 struct fs_enet_private *fep = netdev_priv(dev); 883 struct fs_enet_private *fep = netdev_priv(dev);
833 unsigned long flags; 884 return phy_ethtool_gset(fep->phydev, cmd);
834 int rc;
835
836 spin_lock_irqsave(&fep->lock, flags);
837 rc = mii_ethtool_gset(&fep->mii_if, cmd);
838 spin_unlock_irqrestore(&fep->lock, flags);
839
840 return rc;
841} 885}
842 886
843static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 887static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
844{ 888{
845 struct fs_enet_private *fep = netdev_priv(dev); 889 struct fs_enet_private *fep = netdev_priv(dev);
846 unsigned long flags; 890 phy_ethtool_sset(fep->phydev, cmd);
847 int rc; 891 return 0;
848
849 spin_lock_irqsave(&fep->lock, flags);
850 rc = mii_ethtool_sset(&fep->mii_if, cmd);
851 spin_unlock_irqrestore(&fep->lock, flags);
852
853 return rc;
854} 892}
855 893
856static int fs_nway_reset(struct net_device *dev) 894static int fs_nway_reset(struct net_device *dev)
857{ 895{
858 struct fs_enet_private *fep = netdev_priv(dev); 896 return 0;
859 return mii_nway_restart(&fep->mii_if);
860} 897}
861 898
862static u32 fs_get_msglevel(struct net_device *dev) 899static u32 fs_get_msglevel(struct net_device *dev)
@@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
898 return -EINVAL; 935 return -EINVAL;
899 936
900 spin_lock_irqsave(&fep->lock, flags); 937 spin_lock_irqsave(&fep->lock, flags);
901 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); 938 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
902 spin_unlock_irqrestore(&fep->lock, flags); 939 spin_unlock_irqrestore(&fep->lock, flags);
903 return rc; 940 return rc;
904} 941}
@@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev,
1030 } 1067 }
1031 registered = 1; 1068 registered = 1;
1032 1069
1033 err = fs_mii_connect(ndev);
1034 if (err != 0) {
1035 printk(KERN_ERR DRV_MODULE_NAME
1036 ": %s fs_mii_connect failed.\n", ndev->name);
1037 goto err;
1038 }
1039 1070
1040 return ndev; 1071 return ndev;
1041 1072
@@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev)
1073 1104
1074 fpi = fep->fpi; 1105 fpi = fep->fpi;
1075 1106
1076 fs_mii_disconnect(ndev);
1077
1078 unregister_netdev(ndev); 1107 unregister_netdev(ndev);
1079 1108
1080 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), 1109 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
@@ -1196,17 +1225,39 @@ static int __init fs_init(void)
1196 r = setup_immap(); 1225 r = setup_immap();
1197 if (r != 0) 1226 if (r != 0)
1198 return r; 1227 return r;
1199 r = driver_register(&fs_enet_fec_driver); 1228
1229#ifdef CONFIG_FS_ENET_HAS_FCC
1230 /* let's insert mii stuff */
1231 r = fs_enet_mdio_bb_init();
1232
1233 if (r != 0) {
1234 printk(KERN_ERR DRV_MODULE_NAME
1235 "BB PHY init failed.\n");
1236 return r;
1237 }
1238 r = driver_register(&fs_enet_fcc_driver);
1200 if (r != 0) 1239 if (r != 0)
1201 goto err; 1240 goto err;
1241#endif
1202 1242
1203 r = driver_register(&fs_enet_fcc_driver); 1243#ifdef CONFIG_FS_ENET_HAS_FEC
1244 r = fs_enet_mdio_fec_init();
1245 if (r != 0) {
1246 printk(KERN_ERR DRV_MODULE_NAME
1247 "FEC PHY init failed.\n");
1248 return r;
1249 }
1250
1251 r = driver_register(&fs_enet_fec_driver);
1204 if (r != 0) 1252 if (r != 0)
1205 goto err; 1253 goto err;
1254#endif
1206 1255
1256#ifdef CONFIG_FS_ENET_HAS_SCC
1207 r = driver_register(&fs_enet_scc_driver); 1257 r = driver_register(&fs_enet_scc_driver);
1208 if (r != 0) 1258 if (r != 0)
1209 goto err; 1259 goto err;
1260#endif
1210 1261
1211 return 0; 1262 return 0;
1212err: 1263err:
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
deleted file mode 100644
index b7e6e21725cb..000000000000
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ /dev/null
@@ -1,505 +0,0 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/mii.h>
37#include <linux/ethtool.h>
38#include <linux/bitops.h>
39
40#include <asm/pgtable.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43
44#include "fs_enet.h"
45
46/*************************************************/
47
48/*
49 * Generic PHY support.
50 * Should work for all PHYs, but link change is detected by polling
51 */
52
53static void generic_timer_callback(unsigned long data)
54{
55 struct net_device *dev = (struct net_device *)data;
56 struct fs_enet_private *fep = netdev_priv(dev);
57
58 fep->phy_timer_list.expires = jiffies + HZ / 2;
59
60 add_timer(&fep->phy_timer_list);
61
62 fs_mii_link_status_change_check(dev, 0);
63}
64
65static void generic_startup(struct net_device *dev)
66{
67 struct fs_enet_private *fep = netdev_priv(dev);
68
69 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
70 fep->phy_timer_list.data = (unsigned long)dev;
71 fep->phy_timer_list.function = generic_timer_callback;
72 add_timer(&fep->phy_timer_list);
73}
74
75static void generic_shutdown(struct net_device *dev)
76{
77 struct fs_enet_private *fep = netdev_priv(dev);
78
79 del_timer_sync(&fep->phy_timer_list);
80}
81
82/* ------------------------------------------------------------------------- */
83/* The Davicom DM9161 is used on the NETTA board */
84
85/* register definitions */
86
87#define MII_DM9161_ANAR 4 /* Aux. Config Register */
88#define MII_DM9161_ACR 16 /* Aux. Config Register */
89#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
90#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
91#define MII_DM9161_INTR 21 /* Interrupt Register */
92#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
93#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
94
95static void dm9161_startup(struct net_device *dev)
96{
97 struct fs_enet_private *fep = netdev_priv(dev);
98
99 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
100 /* Start autonegotiation */
101 fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
102
103 set_current_state(TASK_UNINTERRUPTIBLE);
104 schedule_timeout(HZ*8);
105}
106
107static void dm9161_ack_int(struct net_device *dev)
108{
109 struct fs_enet_private *fep = netdev_priv(dev);
110
111 fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
112}
113
114static void dm9161_shutdown(struct net_device *dev)
115{
116 struct fs_enet_private *fep = netdev_priv(dev);
117
118 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
119}
120
121/**********************************************************************************/
122
123static const struct phy_info phy_info[] = {
124 {
125 .id = 0x00181b88,
126 .name = "DM9161",
127 .startup = dm9161_startup,
128 .ack_int = dm9161_ack_int,
129 .shutdown = dm9161_shutdown,
130 }, {
131 .id = 0,
132 .name = "GENERIC",
133 .startup = generic_startup,
134 .shutdown = generic_shutdown,
135 },
136};
137
138/**********************************************************************************/
139
140static int phy_id_detect(struct net_device *dev)
141{
142 struct fs_enet_private *fep = netdev_priv(dev);
143 const struct fs_platform_info *fpi = fep->fpi;
144 struct fs_enet_mii_bus *bus = fep->mii_bus;
145 int i, r, start, end, phytype, physubtype;
146 const struct phy_info *phy;
147 int phy_hwid, phy_id;
148
149 phy_hwid = -1;
150 fep->phy = NULL;
151
152 /* auto-detect? */
153 if (fpi->phy_addr == -1) {
154 start = 1;
155 end = 32;
156 } else { /* direct */
157 start = fpi->phy_addr;
158 end = start + 1;
159 }
160
161 for (phy_id = start; phy_id < end; phy_id++) {
162 /* skip already used phy addresses on this bus */
163 if (bus->usage_map & (1 << phy_id))
164 continue;
165 r = fs_mii_read(dev, phy_id, MII_PHYSID1);
166 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
167 continue;
168 r = fs_mii_read(dev, phy_id, MII_PHYSID2);
169 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
170 continue;
171 phy_hwid = (phytype << 16) | physubtype;
172 if (phy_hwid != -1)
173 break;
174 }
175
176 if (phy_hwid == -1) {
177 printk(KERN_ERR DRV_MODULE_NAME
178 ": %s No PHY detected! range=0x%02x-0x%02x\n",
179 dev->name, start, end);
180 return -1;
181 }
182
183 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
184 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
185 break;
186
187 if (i >= ARRAY_SIZE(phy_info)) {
188 printk(KERN_ERR DRV_MODULE_NAME
189 ": %s PHY id 0x%08x is not supported!\n",
190 dev->name, phy_hwid);
191 return -1;
192 }
193
194 fep->phy = phy;
195
196 /* mark this address as used */
197 bus->usage_map |= (1 << phy_id);
198
199 printk(KERN_INFO DRV_MODULE_NAME
200 ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
201 dev->name, phy_id, fep->phy->name, phy_hwid,
202 fpi->phy_addr == -1 ? " (auto-detected)" : "");
203
204 return phy_id;
205}
206
207void fs_mii_startup(struct net_device *dev)
208{
209 struct fs_enet_private *fep = netdev_priv(dev);
210
211 if (fep->phy->startup)
212 (*fep->phy->startup) (dev);
213}
214
215void fs_mii_shutdown(struct net_device *dev)
216{
217 struct fs_enet_private *fep = netdev_priv(dev);
218
219 if (fep->phy->shutdown)
220 (*fep->phy->shutdown) (dev);
221}
222
223void fs_mii_ack_int(struct net_device *dev)
224{
225 struct fs_enet_private *fep = netdev_priv(dev);
226
227 if (fep->phy->ack_int)
228 (*fep->phy->ack_int) (dev);
229}
230
231#define MII_LINK 0x0001
232#define MII_HALF 0x0002
233#define MII_FULL 0x0004
234#define MII_BASE4 0x0008
235#define MII_10M 0x0010
236#define MII_100M 0x0020
237#define MII_1G 0x0040
238#define MII_10G 0x0080
239
240/* return full mii info at one gulp, with a usable form */
241static unsigned int mii_full_status(struct mii_if_info *mii)
242{
243 unsigned int status;
244 int bmsr, adv, lpa, neg;
245 struct fs_enet_private* fep = netdev_priv(mii->dev);
246
247 /* first, a dummy read, needed to latch some MII phys */
248 (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
249 bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
250
251 /* no link */
252 if ((bmsr & BMSR_LSTATUS) == 0)
253 return 0;
254
255 status = MII_LINK;
256
257 /* Lets look what ANEG says if it's supported - otherwize we shall
258 take the right values from the platform info*/
259 if(!mii->force_media) {
260 /* autoneg not completed; don't bother */
261 if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
262 return 0;
263
264 adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
265 lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
266
267 neg = lpa & adv;
268 } else {
269 neg = fep->fpi->bus_info->lpa;
270 }
271
272 if (neg & LPA_100FULL)
273 status |= MII_FULL | MII_100M;
274 else if (neg & LPA_100BASE4)
275 status |= MII_FULL | MII_BASE4 | MII_100M;
276 else if (neg & LPA_100HALF)
277 status |= MII_HALF | MII_100M;
278 else if (neg & LPA_10FULL)
279 status |= MII_FULL | MII_10M;
280 else
281 status |= MII_HALF | MII_10M;
282
283 return status;
284}
285
286void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
287{
288 struct fs_enet_private *fep = netdev_priv(dev);
289 struct mii_if_info *mii = &fep->mii_if;
290 unsigned int mii_status;
291 int ok_to_print, link, duplex, speed;
292 unsigned long flags;
293
294 ok_to_print = netif_msg_link(fep);
295
296 mii_status = mii_full_status(mii);
297
298 if (!init_media && mii_status == fep->last_mii_status)
299 return;
300
301 fep->last_mii_status = mii_status;
302
303 link = !!(mii_status & MII_LINK);
304 duplex = !!(mii_status & MII_FULL);
305 speed = (mii_status & MII_100M) ? 100 : 10;
306
307 if (link == 0) {
308 netif_carrier_off(mii->dev);
309 netif_stop_queue(dev);
310 if (!init_media) {
311 spin_lock_irqsave(&fep->lock, flags);
312 (*fep->ops->stop)(dev);
313 spin_unlock_irqrestore(&fep->lock, flags);
314 }
315
316 if (ok_to_print)
317 printk(KERN_INFO "%s: link down\n", mii->dev->name);
318
319 } else {
320
321 mii->full_duplex = duplex;
322
323 netif_carrier_on(mii->dev);
324
325 spin_lock_irqsave(&fep->lock, flags);
326 fep->duplex = duplex;
327 fep->speed = speed;
328 (*fep->ops->restart)(dev);
329 spin_unlock_irqrestore(&fep->lock, flags);
330
331 netif_start_queue(dev);
332
333 if (ok_to_print)
334 printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
335 dev->name, speed, duplex ? "full" : "half");
336 }
337}
338
339/**********************************************************************************/
340
341int fs_mii_read(struct net_device *dev, int phy_id, int location)
342{
343 struct fs_enet_private *fep = netdev_priv(dev);
344 struct fs_enet_mii_bus *bus = fep->mii_bus;
345
346 unsigned long flags;
347 int ret;
348
349 spin_lock_irqsave(&bus->mii_lock, flags);
350 ret = (*bus->mii_read)(bus, phy_id, location);
351 spin_unlock_irqrestore(&bus->mii_lock, flags);
352
353 return ret;
354}
355
356void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
357{
358 struct fs_enet_private *fep = netdev_priv(dev);
359 struct fs_enet_mii_bus *bus = fep->mii_bus;
360 unsigned long flags;
361
362 spin_lock_irqsave(&bus->mii_lock, flags);
363 (*bus->mii_write)(bus, phy_id, location, value);
364 spin_unlock_irqrestore(&bus->mii_lock, flags);
365}
366
367/*****************************************************************************/
368
369/* list of all registered mii buses */
370static LIST_HEAD(fs_mii_bus_list);
371
372static struct fs_enet_mii_bus *lookup_bus(int method, int id)
373{
374 struct list_head *ptr;
375 struct fs_enet_mii_bus *bus;
376
377 list_for_each(ptr, &fs_mii_bus_list) {
378 bus = list_entry(ptr, struct fs_enet_mii_bus, list);
379 if (bus->bus_info->method == method &&
380 bus->bus_info->id == id)
381 return bus;
382 }
383 return NULL;
384}
385
386static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
387{
388 struct fs_enet_mii_bus *bus;
389 int ret = 0;
390
391 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
392 if (bus == NULL) {
393 ret = -ENOMEM;
394 goto err;
395 }
396 memset(bus, 0, sizeof(*bus));
397 spin_lock_init(&bus->mii_lock);
398 bus->bus_info = bi;
399 bus->refs = 0;
400 bus->usage_map = 0;
401
402 /* perform initialization */
403 switch (bi->method) {
404
405 case fsmii_fixed:
406 ret = fs_mii_fixed_init(bus);
407 if (ret != 0)
408 goto err;
409 break;
410
411 case fsmii_bitbang:
412 ret = fs_mii_bitbang_init(bus);
413 if (ret != 0)
414 goto err;
415 break;
416#ifdef CONFIG_FS_ENET_HAS_FEC
417 case fsmii_fec:
418 ret = fs_mii_fec_init(bus);
419 if (ret != 0)
420 goto err;
421 break;
422#endif
423 default:
424 ret = -EINVAL;
425 goto err;
426 }
427
428 list_add(&bus->list, &fs_mii_bus_list);
429
430 return bus;
431
432err:
433 kfree(bus);
434 return ERR_PTR(ret);
435}
436
437static void destroy_bus(struct fs_enet_mii_bus *bus)
438{
439 /* remove from bus list */
440 list_del(&bus->list);
441
442 /* nothing more needed */
443 kfree(bus);
444}
445
446int fs_mii_connect(struct net_device *dev)
447{
448 struct fs_enet_private *fep = netdev_priv(dev);
449 const struct fs_platform_info *fpi = fep->fpi;
450 struct fs_enet_mii_bus *bus = NULL;
451
452 /* check method validity */
453 switch (fpi->bus_info->method) {
454 case fsmii_fixed:
455 case fsmii_bitbang:
456 break;
457#ifdef CONFIG_FS_ENET_HAS_FEC
458 case fsmii_fec:
459 break;
460#endif
461 default:
462 printk(KERN_ERR DRV_MODULE_NAME
463 ": %s Unknown MII bus method (%d)!\n",
464 dev->name, fpi->bus_info->method);
465 return -EINVAL;
466 }
467
468 bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
469
470 /* if not found create new bus */
471 if (bus == NULL) {
472 bus = create_bus(fpi->bus_info);
473 if (IS_ERR(bus)) {
474 printk(KERN_ERR DRV_MODULE_NAME
475 ": %s MII bus creation failure!\n", dev->name);
476 return PTR_ERR(bus);
477 }
478 }
479
480 bus->refs++;
481
482 fep->mii_bus = bus;
483
484 fep->mii_if.dev = dev;
485 fep->mii_if.phy_id_mask = 0x1f;
486 fep->mii_if.reg_num_mask = 0x1f;
487 fep->mii_if.mdio_read = fs_mii_read;
488 fep->mii_if.mdio_write = fs_mii_write;
489 fep->mii_if.force_media = fpi->bus_info->disable_aneg;
490 fep->mii_if.phy_id = phy_id_detect(dev);
491
492 return 0;
493}
494
495void fs_mii_disconnect(struct net_device *dev)
496{
497 struct fs_enet_private *fep = netdev_priv(dev);
498 struct fs_enet_mii_bus *bus = NULL;
499
500 bus = fep->mii_bus;
501 fep->mii_bus = NULL;
502
503 if (--bus->refs <= 0)
504 destroy_bus(bus);
505}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index e7ec96c964a9..95022c005f75 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -5,6 +5,7 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/phy.h>
8 9
9#include <linux/fs_enet_pd.h> 10#include <linux/fs_enet_pd.h>
10 11
@@ -12,12 +13,30 @@
12 13
13#ifdef CONFIG_CPM1 14#ifdef CONFIG_CPM1
14#include <asm/commproc.h> 15#include <asm/commproc.h>
16
17struct fec_info {
18 fec_t* fecp;
19 u32 mii_speed;
20};
15#endif 21#endif
16 22
17#ifdef CONFIG_CPM2 23#ifdef CONFIG_CPM2
18#include <asm/cpm2.h> 24#include <asm/cpm2.h>
19#endif 25#endif
20 26
27/* This is used to operate with pins.
28 Note that the actual port size may
29 be different; cpm(s) handle it OK */
30struct bb_info {
31 u8 mdio_dat_msk;
32 u8 mdio_dir_msk;
33 u8 *mdio_dir;
34 u8 *mdio_dat;
35 u8 mdc_msk;
36 u8 *mdc_dat;
37 int delay;
38};
39
21/* hw driver ops */ 40/* hw driver ops */
22struct fs_ops { 41struct fs_ops {
23 int (*setup_data)(struct net_device *dev); 42 int (*setup_data)(struct net_device *dev);
@@ -25,6 +44,7 @@ struct fs_ops {
25 void (*free_bd)(struct net_device *dev); 44 void (*free_bd)(struct net_device *dev);
26 void (*cleanup_data)(struct net_device *dev); 45 void (*cleanup_data)(struct net_device *dev);
27 void (*set_multicast_list)(struct net_device *dev); 46 void (*set_multicast_list)(struct net_device *dev);
47 void (*adjust_link)(struct net_device *dev);
28 void (*restart)(struct net_device *dev); 48 void (*restart)(struct net_device *dev);
29 void (*stop)(struct net_device *dev); 49 void (*stop)(struct net_device *dev);
30 void (*pre_request_irq)(struct net_device *dev, int irq); 50 void (*pre_request_irq)(struct net_device *dev, int irq);
@@ -100,10 +120,6 @@ struct fs_enet_mii_bus {
100 }; 120 };
101}; 121};
102 122
103int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
104int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
105int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
106
107struct fs_enet_private { 123struct fs_enet_private {
108 struct device *dev; /* pointer back to the device (must be initialized first) */ 124 struct device *dev; /* pointer back to the device (must be initialized first) */
109 spinlock_t lock; /* during all ops except TX pckt processing */ 125 spinlock_t lock; /* during all ops except TX pckt processing */
@@ -130,7 +146,8 @@ struct fs_enet_private {
130 struct fs_enet_mii_bus *mii_bus; 146 struct fs_enet_mii_bus *mii_bus;
131 int interrupt; 147 int interrupt;
132 148
133 int duplex, speed; /* current settings */ 149 struct phy_device *phydev;
150 int oldduplex, oldspeed, oldlink; /* current settings */
134 151
135 /* event masks */ 152 /* event masks */
136 u32 ev_napi_rx; /* mask of NAPI rx events */ 153 u32 ev_napi_rx; /* mask of NAPI rx events */
@@ -168,15 +185,9 @@ struct fs_enet_private {
168}; 185};
169 186
170/***************************************************************************/ 187/***************************************************************************/
171 188int fs_enet_mdio_bb_init(void);
172int fs_mii_read(struct net_device *dev, int phy_id, int location); 189int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
173void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); 190int fs_enet_mdio_fec_init(void);
174
175void fs_mii_startup(struct net_device *dev);
176void fs_mii_shutdown(struct net_device *dev);
177void fs_mii_ack_int(struct net_device *dev);
178
179void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
180 191
181void fs_init_bds(struct net_device *dev); 192void fs_init_bds(struct net_device *dev);
182void fs_cleanup_bds(struct net_device *dev); 193void fs_cleanup_bds(struct net_device *dev);
@@ -194,7 +205,6 @@ int fs_enet_platform_init(void);
194void fs_enet_platform_cleanup(void); 205void fs_enet_platform_cleanup(void);
195 206
196/***************************************************************************/ 207/***************************************************************************/
197
198/* buffer descriptor access macros */ 208/* buffer descriptor access macros */
199 209
200/* access macros */ 210/* access macros */
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 64e20982c1fe..1ff2597b8495 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/phy.h>
37 38
38#include <asm/immap_cpm2.h> 39#include <asm/immap_cpm2.h>
39#include <asm/mpc8260.h> 40#include <asm/mpc8260.h>
@@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep)
122 123
123 /* Attach the memory for the FCC Parameter RAM */ 124 /* Attach the memory for the FCC Parameter RAM */
124 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); 125 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
125 fep->fcc.ep = (void *)r->start; 126 fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1);
126
127 if (fep->fcc.ep == NULL) 127 if (fep->fcc.ep == NULL)
128 return -EINVAL; 128 return -EINVAL;
129 129
130 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); 130 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
131 fep->fcc.fccp = (void *)r->start; 131 fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1);
132
133 if (fep->fcc.fccp == NULL) 132 if (fep->fcc.fccp == NULL)
134 return -EINVAL; 133 return -EINVAL;
135 134
136 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; 135 if (fep->fpi->fcc_regs_c) {
136
137 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
138 } else {
139 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
140 "fcc_regs_c");
141 fep->fcc.fcccp = (void *)ioremap(r->start,
142 r->end - r->start + 1);
143 }
137 144
138 if (fep->fcc.fcccp == NULL) 145 if (fep->fcc.fcccp == NULL)
139 return -EINVAL; 146 return -EINVAL;
140 147
148 fep->fcc.mem = (void *)fep->fpi->mem_offset;
149 if (fep->fcc.mem == NULL)
150 return -EINVAL;
151
141 return 0; 152 return 0;
142} 153}
143 154
@@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev)
155 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ 166 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
156 return -EINVAL; 167 return -EINVAL;
157 168
158 fep->fcc.mem = (void *)fpi->mem_offset;
159
160 if (do_pd_setup(fep) != 0) 169 if (do_pd_setup(fep) != 0)
161 return -EINVAL; 170 return -EINVAL;
162 171
@@ -394,7 +403,7 @@ static void restart(struct net_device *dev)
394 403
395 /* adjust to speed (for RMII mode) */ 404 /* adjust to speed (for RMII mode) */
396 if (fpi->use_rmii) { 405 if (fpi->use_rmii) {
397 if (fep->speed == 100) 406 if (fep->phydev->speed == 100)
398 C8(fcccp, fcc_gfemr, 0x20); 407 C8(fcccp, fcc_gfemr, 0x20);
399 else 408 else
400 S8(fcccp, fcc_gfemr, 0x20); 409 S8(fcccp, fcc_gfemr, 0x20);
@@ -420,7 +429,7 @@ static void restart(struct net_device *dev)
420 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); 429 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
421 430
422 /* adjust to duplex mode */ 431 /* adjust to duplex mode */
423 if (fep->duplex) 432 if (fep->phydev->duplex)
424 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 433 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
425 else 434 else
426 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 435 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
@@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev)
486 495
487static void tx_kickstart(struct net_device *dev) 496static void tx_kickstart(struct net_device *dev)
488{ 497{
489 /* nothing */ 498 struct fs_enet_private *fep = netdev_priv(dev);
499 fcc_t *fccp = fep->fcc.fccp;
500
501 S32(fccp, fcc_ftodr, 0x80);
490} 502}
491 503
492static u32 get_int_events(struct net_device *dev) 504static u32 get_int_events(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index e09547077529..c2c5fd419bd0 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -46,6 +46,7 @@
46#endif 46#endif
47 47
48#include "fs_enet.h" 48#include "fs_enet.h"
49#include "fec.h"
49 50
50/*************************************************/ 51/*************************************************/
51 52
@@ -75,50 +76,8 @@
75/* clear bits */ 76/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) 77#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77 78
78
79/* CRC polynomium used by the FEC for the multicast group filtering */
80#define FEC_CRC_POLY 0x04C11DB7
81
82#define FEC_MAX_MULTICAST_ADDRS 64
83
84/* Interrupt events/masks.
85*/
86#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
87#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
88#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
89#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
90#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
91#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
92#define FEC_ENET_RXF 0x02000000U /* Full frame received */
93#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
94#define FEC_ENET_MII 0x00800000U /* MII interrupt */
95#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
96
97#define FEC_ECNTRL_PINMUX 0x00000004
98#define FEC_ECNTRL_ETHER_EN 0x00000002
99#define FEC_ECNTRL_RESET 0x00000001
100
101#define FEC_RCNTRL_BC_REJ 0x00000010
102#define FEC_RCNTRL_PROM 0x00000008
103#define FEC_RCNTRL_MII_MODE 0x00000004
104#define FEC_RCNTRL_DRT 0x00000002
105#define FEC_RCNTRL_LOOP 0x00000001
106
107#define FEC_TCNTRL_FDEN 0x00000004
108#define FEC_TCNTRL_HBC 0x00000002
109#define FEC_TCNTRL_GTS 0x00000001
110
111
112/* Make MII read/write commands for the FEC.
113*/
114#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
115#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
116#define mk_mii_end 0
117
118#define FEC_MII_LOOPS 10000
119
120/* 79/*
121 * Delay to wait for FEC reset command to complete (in us) 80 * Delay to wait for FEC reset command to complete (in us)
122 */ 81 */
123#define FEC_RESET_DELAY 50 82#define FEC_RESET_DELAY 50
124 83
@@ -303,13 +262,15 @@ static void restart(struct net_device *dev)
303 int r; 262 int r;
304 u32 addrhi, addrlo; 263 u32 addrhi, addrlo;
305 264
265 struct mii_bus* mii = fep->phydev->bus;
266 struct fec_info* fec_inf = mii->priv;
267
306 r = whack_reset(fep->fec.fecp); 268 r = whack_reset(fep->fec.fecp);
307 if (r != 0) 269 if (r != 0)
308 printk(KERN_ERR DRV_MODULE_NAME 270 printk(KERN_ERR DRV_MODULE_NAME
309 ": %s FEC Reset FAILED!\n", dev->name); 271 ": %s FEC Reset FAILED!\n", dev->name);
310
311 /* 272 /*
312 * Set station address. 273 * Set station address.
313 */ 274 */
314 addrhi = ((u32) dev->dev_addr[0] << 24) | 275 addrhi = ((u32) dev->dev_addr[0] << 24) |
315 ((u32) dev->dev_addr[1] << 16) | 276 ((u32) dev->dev_addr[1] << 16) |
@@ -350,12 +311,12 @@ static void restart(struct net_device *dev)
350 FW(fecp, fun_code, 0x78000000); 311 FW(fecp, fun_code, 0x78000000);
351 312
352 /* 313 /*
353 * Set MII speed. 314 * Set MII speed.
354 */ 315 */
355 FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); 316 FW(fecp, mii_speed, fec_inf->mii_speed);
356 317
357 /* 318 /*
358 * Clear any outstanding interrupt. 319 * Clear any outstanding interrupt.
359 */ 320 */
360 FW(fecp, ievent, 0xffc0); 321 FW(fecp, ievent, 0xffc0);
361 FW(fecp, ivec, (fep->interrupt / 2) << 29); 322 FW(fecp, ivec, (fep->interrupt / 2) << 29);
@@ -390,11 +351,12 @@ static void restart(struct net_device *dev)
390 } 351 }
391#endif 352#endif
392 353
354
393 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 355 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
394 /* 356 /*
395 * adjust to duplex mode 357 * adjust to duplex mode
396 */ 358 */
397 if (fep->duplex) { 359 if (fep->phydev->duplex) {
398 FC(fecp, r_cntrl, FEC_RCNTRL_DRT); 360 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
399 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ 361 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
400 } else { 362 } else {
@@ -418,9 +380,11 @@ static void restart(struct net_device *dev)
418static void stop(struct net_device *dev) 380static void stop(struct net_device *dev)
419{ 381{
420 struct fs_enet_private *fep = netdev_priv(dev); 382 struct fs_enet_private *fep = netdev_priv(dev);
383 const struct fs_platform_info *fpi = fep->fpi;
421 fec_t *fecp = fep->fec.fecp; 384 fec_t *fecp = fep->fec.fecp;
422 struct fs_enet_mii_bus *bus = fep->mii_bus; 385
423 const struct fs_mii_bus_info *bi = bus->bus_info; 386 struct fec_info* feci= fep->phydev->bus->priv;
387
424 int i; 388 int i;
425 389
426 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) 390 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -444,11 +408,11 @@ static void stop(struct net_device *dev)
444 fs_cleanup_bds(dev); 408 fs_cleanup_bds(dev);
445 409
446 /* shut down FEC1? that's where the mii bus is */ 410 /* shut down FEC1? that's where the mii bus is */
447 if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { 411 if (fpi->has_phy) {
448 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 412 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
449 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); 413 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
450 FW(fecp, ievent, FEC_ENET_MII); 414 FW(fecp, ievent, FEC_ENET_MII);
451 FW(fecp, mii_speed, bus->fec.mii_speed); 415 FW(fecp, mii_speed, feci->mii_speed);
452 } 416 }
453} 417}
454 418
@@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = {
583 .free_bd = free_bd, 547 .free_bd = free_bd,
584}; 548};
585 549
586/***********************************************************************/
587
588static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
589{
590 fec_t *fecp = bus->fec.fecp;
591 int i, ret = -1;
592
593 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
594 BUG();
595
596 /* Add PHY address to register command. */
597 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
598
599 for (i = 0; i < FEC_MII_LOOPS; i++)
600 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
601 break;
602
603 if (i < FEC_MII_LOOPS) {
604 FW(fecp, ievent, FEC_ENET_MII);
605 ret = FR(fecp, mii_data) & 0xffff;
606 }
607
608 return ret;
609}
610
611static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
612{
613 fec_t *fecp = bus->fec.fecp;
614 int i;
615
616 /* this must never happen */
617 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
618 BUG();
619
620 /* Add PHY address to register command. */
621 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
622
623 for (i = 0; i < FEC_MII_LOOPS; i++)
624 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
625 break;
626
627 if (i < FEC_MII_LOOPS)
628 FW(fecp, ievent, FEC_ENET_MII);
629}
630
631int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
632{
633 bd_t *bd = (bd_t *)__res;
634 const struct fs_mii_bus_info *bi = bus->bus_info;
635 fec_t *fecp;
636
637 if (bi->id != 0)
638 return -1;
639
640 bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
641 bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
642 & 0x3F) << 1;
643
644 fecp = bus->fec.fecp;
645
646 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
647 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
648 FW(fecp, ievent, FEC_ENET_MII);
649 FW(fecp, mii_speed, bus->fec.mii_speed);
650
651 bus->mii_read = mii_read;
652 bus->mii_write = mii_write;
653
654 return 0;
655}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index eaa24fab645f..95ec5872c507 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -369,7 +369,7 @@ static void restart(struct net_device *dev)
369 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); 369 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
370 370
371 /* Set full duplex mode if needed */ 371 /* Set full duplex mode if needed */
372 if (fep->duplex) 372 if (fep->phydev->duplex)
373 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); 373 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
374 374
375 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 375 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev)
500 scc_cr_cmd(fep, CPM_CR_RESTART_TX); 500 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
501} 501}
502 502
503
504
503/*************************************************************************/ 505/*************************************************************************/
504 506
505const struct fs_ops fs_scc_ops = { 507const struct fs_ops fs_scc_ops = {
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 48f9cf83ab6f..0b9b8b5c847c 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -33,6 +33,7 @@
33#include <linux/mii.h> 33#include <linux/mii.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
36#include <linux/platform_device.h>
36 37
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
38#include <asm/irq.h> 39#include <asm/irq.h>
@@ -40,129 +41,25 @@
40 41
41#include "fs_enet.h" 42#include "fs_enet.h"
42 43
43#ifdef CONFIG_8xx 44static int bitbang_prep_bit(u8 **datp, u8 *mskp,
44static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) 45 struct fs_mii_bit *mii_bit)
45{ 46{
46 immap_t *im = (immap_t *)fs_enet_immap; 47 void *dat;
47 void *dir, *dat, *ppar;
48 int adv; 48 int adv;
49 u8 msk; 49 u8 msk;
50 50
51 switch (port) { 51 dat = (void*) mii_bit->offset;
52 case fsiop_porta:
53 dir = &im->im_ioport.iop_padir;
54 dat = &im->im_ioport.iop_padat;
55 ppar = &im->im_ioport.iop_papar;
56 break;
57
58 case fsiop_portb:
59 dir = &im->im_cpm.cp_pbdir;
60 dat = &im->im_cpm.cp_pbdat;
61 ppar = &im->im_cpm.cp_pbpar;
62 break;
63
64 case fsiop_portc:
65 dir = &im->im_ioport.iop_pcdir;
66 dat = &im->im_ioport.iop_pcdat;
67 ppar = &im->im_ioport.iop_pcpar;
68 break;
69
70 case fsiop_portd:
71 dir = &im->im_ioport.iop_pddir;
72 dat = &im->im_ioport.iop_pddat;
73 ppar = &im->im_ioport.iop_pdpar;
74 break;
75
76 case fsiop_porte:
77 dir = &im->im_cpm.cp_pedir;
78 dat = &im->im_cpm.cp_pedat;
79 ppar = &im->im_cpm.cp_pepar;
80 break;
81
82 default:
83 printk(KERN_ERR DRV_MODULE_NAME
84 "Illegal port value %d!\n", port);
85 return -EINVAL;
86 }
87
88 adv = bit >> 3;
89 dir = (char *)dir + adv;
90 dat = (char *)dat + adv;
91 ppar = (char *)ppar + adv;
92
93 msk = 1 << (7 - (bit & 7));
94 if ((in_8(ppar) & msk) != 0) {
95 printk(KERN_ERR DRV_MODULE_NAME
96 "pin %d on port %d is not general purpose!\n", bit, port);
97 return -EINVAL;
98 }
99
100 *dirp = dir;
101 *datp = dat;
102 *mskp = msk;
103
104 return 0;
105}
106#endif
107
108#ifdef CONFIG_8260
109static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
110{
111 iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
112 void *dir, *dat, *ppar;
113 int adv;
114 u8 msk;
115
116 switch (port) {
117 case fsiop_porta:
118 dir = &io->iop_pdira;
119 dat = &io->iop_pdata;
120 ppar = &io->iop_ppara;
121 break;
122
123 case fsiop_portb:
124 dir = &io->iop_pdirb;
125 dat = &io->iop_pdatb;
126 ppar = &io->iop_pparb;
127 break;
128
129 case fsiop_portc:
130 dir = &io->iop_pdirc;
131 dat = &io->iop_pdatc;
132 ppar = &io->iop_pparc;
133 break;
134
135 case fsiop_portd:
136 dir = &io->iop_pdird;
137 dat = &io->iop_pdatd;
138 ppar = &io->iop_ppard;
139 break;
140
141 default:
142 printk(KERN_ERR DRV_MODULE_NAME
143 "Illegal port value %d!\n", port);
144 return -EINVAL;
145 }
146 52
147 adv = bit >> 3; 53 adv = mii_bit->bit >> 3;
148 dir = (char *)dir + adv;
149 dat = (char *)dat + adv; 54 dat = (char *)dat + adv;
150 ppar = (char *)ppar + adv;
151 55
152 msk = 1 << (7 - (bit & 7)); 56 msk = 1 << (7 - (mii_bit->bit & 7));
153 if ((in_8(ppar) & msk) != 0) {
154 printk(KERN_ERR DRV_MODULE_NAME
155 "pin %d on port %d is not general purpose!\n", bit, port);
156 return -EINVAL;
157 }
158 57
159 *dirp = dir;
160 *datp = dat; 58 *datp = dat;
161 *mskp = msk; 59 *mskp = msk;
162 60
163 return 0; 61 return 0;
164} 62}
165#endif
166 63
167static inline void bb_set(u8 *p, u8 m) 64static inline void bb_set(u8 *p, u8 m)
168{ 65{
@@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m)
179 return (in_8(p) & m) != 0; 76 return (in_8(p) & m) != 0;
180} 77}
181 78
182static inline void mdio_active(struct fs_enet_mii_bus *bus) 79static inline void mdio_active(struct bb_info *bitbang)
183{ 80{
184 bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); 81 bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk);
185} 82}
186 83
187static inline void mdio_tristate(struct fs_enet_mii_bus *bus) 84static inline void mdio_tristate(struct bb_info *bitbang )
188{ 85{
189 bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); 86 bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk);
190} 87}
191 88
192static inline int mdio_read(struct fs_enet_mii_bus *bus) 89static inline int mdio_read(struct bb_info *bitbang )
193{ 90{
194 return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 91 return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk);
195} 92}
196 93
197static inline void mdio(struct fs_enet_mii_bus *bus, int what) 94static inline void mdio(struct bb_info *bitbang , int what)
198{ 95{
199 if (what) 96 if (what)
200 bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 97 bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk);
201 else 98 else
202 bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 99 bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk);
203} 100}
204 101
205static inline void mdc(struct fs_enet_mii_bus *bus, int what) 102static inline void mdc(struct bb_info *bitbang , int what)
206{ 103{
207 if (what) 104 if (what)
208 bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); 105 bb_set(bitbang->mdc_dat, bitbang->mdc_msk);
209 else 106 else
210 bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); 107 bb_clr(bitbang->mdc_dat, bitbang->mdc_msk);
211} 108}
212 109
213static inline void mii_delay(struct fs_enet_mii_bus *bus) 110static inline void mii_delay(struct bb_info *bitbang )
214{ 111{
215 udelay(bus->bus_info->i.bitbang.delay); 112 udelay(bitbang->delay);
216} 113}
217 114
218/* Utility to send the preamble, address, and register (common to read and write). */ 115/* Utility to send the preamble, address, and register (common to read and write). */
219static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) 116static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg)
220{ 117{
221 int j; 118 int j;
222 119
@@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
228 * but it is safer and will be much more robust. 125 * but it is safer and will be much more robust.
229 */ 126 */
230 127
231 mdio_active(bus); 128 mdio_active(bitbang);
232 mdio(bus, 1); 129 mdio(bitbang, 1);
233 for (j = 0; j < 32; j++) { 130 for (j = 0; j < 32; j++) {
234 mdc(bus, 0); 131 mdc(bitbang, 0);
235 mii_delay(bus); 132 mii_delay(bitbang);
236 mdc(bus, 1); 133 mdc(bitbang, 1);
237 mii_delay(bus); 134 mii_delay(bitbang);
238 } 135 }
239 136
240 /* send the start bit (01) and the read opcode (10) or write (10) */ 137 /* send the start bit (01) and the read opcode (10) or write (10) */
241 mdc(bus, 0); 138 mdc(bitbang, 0);
242 mdio(bus, 0); 139 mdio(bitbang, 0);
243 mii_delay(bus); 140 mii_delay(bitbang);
244 mdc(bus, 1); 141 mdc(bitbang, 1);
245 mii_delay(bus); 142 mii_delay(bitbang);
246 mdc(bus, 0); 143 mdc(bitbang, 0);
247 mdio(bus, 1); 144 mdio(bitbang, 1);
248 mii_delay(bus); 145 mii_delay(bitbang);
249 mdc(bus, 1); 146 mdc(bitbang, 1);
250 mii_delay(bus); 147 mii_delay(bitbang);
251 mdc(bus, 0); 148 mdc(bitbang, 0);
252 mdio(bus, read); 149 mdio(bitbang, read);
253 mii_delay(bus); 150 mii_delay(bitbang);
254 mdc(bus, 1); 151 mdc(bitbang, 1);
255 mii_delay(bus); 152 mii_delay(bitbang);
256 mdc(bus, 0); 153 mdc(bitbang, 0);
257 mdio(bus, !read); 154 mdio(bitbang, !read);
258 mii_delay(bus); 155 mii_delay(bitbang);
259 mdc(bus, 1); 156 mdc(bitbang, 1);
260 mii_delay(bus); 157 mii_delay(bitbang);
261 158
262 /* send the PHY address */ 159 /* send the PHY address */
263 for (j = 0; j < 5; j++) { 160 for (j = 0; j < 5; j++) {
264 mdc(bus, 0); 161 mdc(bitbang, 0);
265 mdio(bus, (addr & 0x10) != 0); 162 mdio(bitbang, (addr & 0x10) != 0);
266 mii_delay(bus); 163 mii_delay(bitbang);
267 mdc(bus, 1); 164 mdc(bitbang, 1);
268 mii_delay(bus); 165 mii_delay(bitbang);
269 addr <<= 1; 166 addr <<= 1;
270 } 167 }
271 168
272 /* send the register address */ 169 /* send the register address */
273 for (j = 0; j < 5; j++) { 170 for (j = 0; j < 5; j++) {
274 mdc(bus, 0); 171 mdc(bitbang, 0);
275 mdio(bus, (reg & 0x10) != 0); 172 mdio(bitbang, (reg & 0x10) != 0);
276 mii_delay(bus); 173 mii_delay(bitbang);
277 mdc(bus, 1); 174 mdc(bitbang, 1);
278 mii_delay(bus); 175 mii_delay(bitbang);
279 reg <<= 1; 176 reg <<= 1;
280 } 177 }
281} 178}
282 179
283static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) 180static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location)
284{ 181{
285 u16 rdreg; 182 u16 rdreg;
286 int ret, j; 183 int ret, j;
287 u8 addr = phy_id & 0xff; 184 u8 addr = phy_id & 0xff;
288 u8 reg = location & 0xff; 185 u8 reg = location & 0xff;
186 struct bb_info* bitbang = bus->priv;
289 187
290 bitbang_pre(bus, 1, addr, reg); 188 bitbang_pre(bitbang, 1, addr, reg);
291 189
292 /* tri-state our MDIO I/O pin so we can read */ 190 /* tri-state our MDIO I/O pin so we can read */
293 mdc(bus, 0); 191 mdc(bitbang, 0);
294 mdio_tristate(bus); 192 mdio_tristate(bitbang);
295 mii_delay(bus); 193 mii_delay(bitbang);
296 mdc(bus, 1); 194 mdc(bitbang, 1);
297 mii_delay(bus); 195 mii_delay(bitbang);
298 196
299 /* check the turnaround bit: the PHY should be driving it to zero */ 197 /* check the turnaround bit: the PHY should be driving it to zero */
300 if (mdio_read(bus) != 0) { 198 if (mdio_read(bitbang) != 0) {
301 /* PHY didn't drive TA low */ 199 /* PHY didn't drive TA low */
302 for (j = 0; j < 32; j++) { 200 for (j = 0; j < 32; j++) {
303 mdc(bus, 0); 201 mdc(bitbang, 0);
304 mii_delay(bus); 202 mii_delay(bitbang);
305 mdc(bus, 1); 203 mdc(bitbang, 1);
306 mii_delay(bus); 204 mii_delay(bitbang);
307 } 205 }
308 ret = -1; 206 ret = -1;
309 goto out; 207 goto out;
310 } 208 }
311 209
312 mdc(bus, 0); 210 mdc(bitbang, 0);
313 mii_delay(bus); 211 mii_delay(bitbang);
314 212
315 /* read 16 bits of register data, MSB first */ 213 /* read 16 bits of register data, MSB first */
316 rdreg = 0; 214 rdreg = 0;
317 for (j = 0; j < 16; j++) { 215 for (j = 0; j < 16; j++) {
318 mdc(bus, 1); 216 mdc(bitbang, 1);
319 mii_delay(bus); 217 mii_delay(bitbang);
320 rdreg <<= 1; 218 rdreg <<= 1;
321 rdreg |= mdio_read(bus); 219 rdreg |= mdio_read(bitbang);
322 mdc(bus, 0); 220 mdc(bitbang, 0);
323 mii_delay(bus); 221 mii_delay(bitbang);
324 } 222 }
325 223
326 mdc(bus, 1); 224 mdc(bitbang, 1);
327 mii_delay(bus); 225 mii_delay(bitbang);
328 mdc(bus, 0); 226 mdc(bitbang, 0);
329 mii_delay(bus); 227 mii_delay(bitbang);
330 mdc(bus, 1); 228 mdc(bitbang, 1);
331 mii_delay(bus); 229 mii_delay(bitbang);
332 230
333 ret = rdreg; 231 ret = rdreg;
334out: 232out:
335 return ret; 233 return ret;
336} 234}
337 235
338static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) 236static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val)
339{ 237{
340 int j; 238 int j;
239 struct bb_info* bitbang = bus->priv;
240
341 u8 addr = phy_id & 0xff; 241 u8 addr = phy_id & 0xff;
342 u8 reg = location & 0xff; 242 u8 reg = location & 0xff;
343 u16 value = val & 0xffff; 243 u16 value = val & 0xffff;
344 244
345 bitbang_pre(bus, 0, addr, reg); 245 bitbang_pre(bitbang, 0, addr, reg);
346 246
347 /* send the turnaround (10) */ 247 /* send the turnaround (10) */
348 mdc(bus, 0); 248 mdc(bitbang, 0);
349 mdio(bus, 1); 249 mdio(bitbang, 1);
350 mii_delay(bus); 250 mii_delay(bitbang);
351 mdc(bus, 1); 251 mdc(bitbang, 1);
352 mii_delay(bus); 252 mii_delay(bitbang);
353 mdc(bus, 0); 253 mdc(bitbang, 0);
354 mdio(bus, 0); 254 mdio(bitbang, 0);
355 mii_delay(bus); 255 mii_delay(bitbang);
356 mdc(bus, 1); 256 mdc(bitbang, 1);
357 mii_delay(bus); 257 mii_delay(bitbang);
358 258
359 /* write 16 bits of register data, MSB first */ 259 /* write 16 bits of register data, MSB first */
360 for (j = 0; j < 16; j++) { 260 for (j = 0; j < 16; j++) {
361 mdc(bus, 0); 261 mdc(bitbang, 0);
362 mdio(bus, (value & 0x8000) != 0); 262 mdio(bitbang, (value & 0x8000) != 0);
363 mii_delay(bus); 263 mii_delay(bitbang);
364 mdc(bus, 1); 264 mdc(bitbang, 1);
365 mii_delay(bus); 265 mii_delay(bitbang);
366 value <<= 1; 266 value <<= 1;
367 } 267 }
368 268
369 /* 269 /*
370 * Tri-state the MDIO line. 270 * Tri-state the MDIO line.
371 */ 271 */
372 mdio_tristate(bus); 272 mdio_tristate(bitbang);
373 mdc(bus, 0); 273 mdc(bitbang, 0);
374 mii_delay(bus); 274 mii_delay(bitbang);
375 mdc(bus, 1); 275 mdc(bitbang, 1);
376 mii_delay(bus); 276 mii_delay(bitbang);
277 return 0;
377} 278}
378 279
379int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) 280static int fs_enet_mii_bb_reset(struct mii_bus *bus)
281{
282 /*nothing here - dunno how to reset it*/
283 return 0;
284}
285
286static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi)
380{ 287{
381 const struct fs_mii_bus_info *bi = bus->bus_info;
382 int r; 288 int r;
383 289
384 r = bitbang_prep_bit(&bus->bitbang.mdio_dir, 290 bitbang->delay = fmpi->delay;
385 &bus->bitbang.mdio_dat, 291
386 &bus->bitbang.mdio_msk, 292 r = bitbang_prep_bit(&bitbang->mdio_dir,
387 bi->i.bitbang.mdio_port, 293 &bitbang->mdio_dir_msk,
388 bi->i.bitbang.mdio_bit); 294 &fmpi->mdio_dir);
389 if (r != 0) 295 if (r != 0)
390 return r; 296 return r;
391 297
392 r = bitbang_prep_bit(&bus->bitbang.mdc_dir, 298 r = bitbang_prep_bit(&bitbang->mdio_dat,
393 &bus->bitbang.mdc_dat, 299 &bitbang->mdio_dat_msk,
394 &bus->bitbang.mdc_msk, 300 &fmpi->mdio_dat);
395 bi->i.bitbang.mdc_port,
396 bi->i.bitbang.mdc_bit);
397 if (r != 0) 301 if (r != 0)
398 return r; 302 return r;
399 303
400 bus->mii_read = mii_read; 304 r = bitbang_prep_bit(&bitbang->mdc_dat,
401 bus->mii_write = mii_write; 305 &bitbang->mdc_msk,
306 &fmpi->mdc_dat);
307 if (r != 0)
308 return r;
402 309
403 return 0; 310 return 0;
404} 311}
312
313
314static int __devinit fs_enet_mdio_probe(struct device *dev)
315{
316 struct platform_device *pdev = to_platform_device(dev);
317 struct fs_mii_bb_platform_info *pdata;
318 struct mii_bus *new_bus;
319 struct bb_info *bitbang;
320 int err = 0;
321
322 if (NULL == dev)
323 return -EINVAL;
324
325 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
326
327 if (NULL == new_bus)
328 return -ENOMEM;
329
330 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
331
332 if (NULL == bitbang)
333 return -ENOMEM;
334
335 new_bus->name = "BB MII Bus",
336 new_bus->read = &fs_enet_mii_bb_read,
337 new_bus->write = &fs_enet_mii_bb_write,
338 new_bus->reset = &fs_enet_mii_bb_reset,
339 new_bus->id = pdev->id;
340
341 new_bus->phy_mask = ~0x9;
342 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
343
344 if (NULL == pdata) {
345 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
346 return -ENODEV;
347 }
348
349 /*set up workspace*/
350 fs_mii_bitbang_init(bitbang, pdata);
351
352 new_bus->priv = bitbang;
353
354 new_bus->irq = pdata->irq;
355
356 new_bus->dev = dev;
357 dev_set_drvdata(dev, new_bus);
358
359 err = mdiobus_register(new_bus);
360
361 if (0 != err) {
362 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
363 new_bus->name);
364 goto bus_register_fail;
365 }
366
367 return 0;
368
369bus_register_fail:
370 kfree(bitbang);
371 kfree(new_bus);
372
373 return err;
374}
375
376
377static int fs_enet_mdio_remove(struct device *dev)
378{
379 struct mii_bus *bus = dev_get_drvdata(dev);
380
381 mdiobus_unregister(bus);
382
383 dev_set_drvdata(dev, NULL);
384
385 iounmap((void *) (&bus->priv));
386 bus->priv = NULL;
387 kfree(bus);
388
389 return 0;
390}
391
392static struct device_driver fs_enet_bb_mdio_driver = {
393 .name = "fsl-bb-mdio",
394 .bus = &platform_bus_type,
395 .probe = fs_enet_mdio_probe,
396 .remove = fs_enet_mdio_remove,
397};
398
399int fs_enet_mdio_bb_init(void)
400{
401 return driver_register(&fs_enet_bb_mdio_driver);
402}
403
404void fs_enet_mdio_bb_exit(void)
405{
406 driver_unregister(&fs_enet_bb_mdio_driver);
407}
408
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
new file mode 100644
index 000000000000..1328e10caa35
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -0,0 +1,243 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37#include <linux/platform_device.h>
38
39#include <asm/pgtable.h>
40#include <asm/irq.h>
41#include <asm/uaccess.h>
42
43#include "fs_enet.h"
44#include "fec.h"
45
46/* Make MII read/write commands for the FEC.
47*/
48#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
49#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
50#define mk_mii_end 0
51
52#define FEC_MII_LOOPS 10000
53
54static int match_has_phy (struct device *dev, void* data)
55{
56 struct platform_device* pdev = container_of(dev, struct platform_device, dev);
57 struct fs_platform_info* fpi;
58 if(strcmp(pdev->name, (char*)data))
59 {
60 return 0;
61 }
62
63 fpi = pdev->dev.platform_data;
64 if((fpi)&&(fpi->has_phy))
65 return 1;
66 return 0;
67}
68
69static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
70{
71 struct resource *r;
72 fec_t *fecp;
73 char* name = "fsl-cpm-fec";
74
75 /* we need fec in order to be useful */
76 struct platform_device *fec_pdev =
77 container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
78 struct platform_device, dev);
79
80 if(fec_pdev == NULL) {
81 printk(KERN_ERR"Unable to find PHY for %s", name);
82 return -ENODEV;
83 }
84
85 r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
86
87 fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t));
88 fec->mii_speed = fmpi->mii_speed;
89
90 setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
91 setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
92 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
93 out_be32(&fecp->fec_mii_speed, fec->mii_speed);
94
95 return 0;
96}
97
98static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
99{
100 struct fec_info* fec = bus->priv;
101 fec_t *fecp = fec->fecp;
102 int i, ret = -1;
103
104 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
105 BUG();
106
107 /* Add PHY address to register command. */
108 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
109
110 for (i = 0; i < FEC_MII_LOOPS; i++)
111 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
112 break;
113
114 if (i < FEC_MII_LOOPS) {
115 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
116 ret = in_be32(&fecp->fec_mii_data) & 0xffff;
117 }
118
119 return ret;
120
121}
122
123static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
124{
125 struct fec_info* fec = bus->priv;
126 fec_t *fecp = fec->fecp;
127 int i;
128
129 /* this must never happen */
130 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
131 BUG();
132
133 /* Add PHY address to register command. */
134 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
135
136 for (i = 0; i < FEC_MII_LOOPS; i++)
137 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
138 break;
139
140 if (i < FEC_MII_LOOPS)
141 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
142
143 return 0;
144
145}
146
147static int fs_enet_fec_mii_reset(struct mii_bus *bus)
148{
149 /* nothing here - for now */
150 return 0;
151}
152
153static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
154{
155 struct platform_device *pdev = to_platform_device(dev);
156 struct fs_mii_fec_platform_info *pdata;
157 struct mii_bus *new_bus;
158 struct fec_info *fec;
159 int err = 0;
160 if (NULL == dev)
161 return -EINVAL;
162 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
163
164 if (NULL == new_bus)
165 return -ENOMEM;
166
167 fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
168
169 if (NULL == fec)
170 return -ENOMEM;
171
172 new_bus->name = "FEC MII Bus",
173 new_bus->read = &fs_enet_fec_mii_read,
174 new_bus->write = &fs_enet_fec_mii_write,
175 new_bus->reset = &fs_enet_fec_mii_reset,
176 new_bus->id = pdev->id;
177
178 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
179
180 if (NULL == pdata) {
181 printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
182 return -ENODEV;
183 }
184
185 /*set up workspace*/
186
187 fs_mii_fec_init(fec, pdata);
188 new_bus->priv = fec;
189
190 new_bus->irq = pdata->irq;
191
192 new_bus->dev = dev;
193 dev_set_drvdata(dev, new_bus);
194
195 err = mdiobus_register(new_bus);
196
197 if (0 != err) {
198 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
199 new_bus->name);
200 goto bus_register_fail;
201 }
202
203 return 0;
204
205bus_register_fail:
206 kfree(new_bus);
207
208 return err;
209}
210
211
212static int fs_enet_fec_mdio_remove(struct device *dev)
213{
214 struct mii_bus *bus = dev_get_drvdata(dev);
215
216 mdiobus_unregister(bus);
217
218 dev_set_drvdata(dev, NULL);
219 kfree(bus->priv);
220
221 bus->priv = NULL;
222 kfree(bus);
223
224 return 0;
225}
226
227static struct device_driver fs_enet_fec_mdio_driver = {
228 .name = "fsl-cpm-fec-mdio",
229 .bus = &platform_bus_type,
230 .probe = fs_enet_fec_mdio_probe,
231 .remove = fs_enet_fec_mdio_remove,
232};
233
234int fs_enet_mdio_fec_init(void)
235{
236 return driver_register(&fs_enet_fec_mdio_driver);
237}
238
239void fs_enet_mdio_fec_exit(void)
240{
241 driver_unregister(&fs_enet_fec_mdio_driver);
242}
243
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
deleted file mode 100644
index ae4a9c3bb393..000000000000
--- a/drivers/net/fs_enet/mii-fixed.c
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36
37#include <asm/pgtable.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#include "fs_enet.h"
42
43static const u16 mii_regs[7] = {
44 0x3100,
45 0x786d,
46 0x0fff,
47 0x0fff,
48 0x01e1,
49 0x45e1,
50 0x0003,
51};
52
53static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
54{
55 int ret = 0;
56
57 if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
58 return -1;
59
60 if (location != 5)
61 ret = mii_regs[location];
62 else
63 ret = bus->fixed.lpa;
64
65 return ret;
66}
67
68static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
69{
70 /* do nothing */
71}
72
73int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
74{
75 const struct fs_mii_bus_info *bi = bus->bus_info;
76
77 bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
78
79 /* if speed is fixed at 10Mb, remove 100Mb modes */
80 if (bi->i.fixed.speed == 10)
81 bus->fixed.lpa &= ~LPA_100;
82
83 /* if duplex is half, remove full duplex modes */
84 if (bi->i.fixed.duplex == 0)
85 bus->fixed.lpa &= ~LPA_DUPLEX;
86
87 bus->mii_read = mii_read;
88 bus->mii_write = mii_write;
89
90 return 0;
91}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index ebbbd6ca6204..5130da094305 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1708,9 +1708,6 @@ static void gfar_set_multi(struct net_device *dev)
1708 u32 tempval; 1708 u32 tempval;
1709 1709
1710 if(dev->flags & IFF_PROMISC) { 1710 if(dev->flags & IFF_PROMISC) {
1711 if (netif_msg_drv(priv))
1712 printk(KERN_INFO "%s: Entering promiscuous mode.\n",
1713 dev->name);
1714 /* Set RCTRL to PROM */ 1711 /* Set RCTRL to PROM */
1715 tempval = gfar_read(&regs->rctrl); 1712 tempval = gfar_read(&regs->rctrl);
1716 tempval |= RCTRL_PROM; 1713 tempval |= RCTRL_PROM;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 409c6aab0411..9927bff75d6f 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -27,8 +27,8 @@
27*/ 27*/
28 28
29#define DRV_NAME "hamachi" 29#define DRV_NAME "hamachi"
30#define DRV_VERSION "2.0" 30#define DRV_VERSION "2.1"
31#define DRV_RELDATE "June 27, 2006" 31#define DRV_RELDATE "Sept 11, 2006"
32 32
33 33
34/* A few user-configurable values. */ 34/* A few user-configurable values. */
@@ -1851,8 +1851,6 @@ static void set_rx_mode(struct net_device *dev)
1851 void __iomem *ioaddr = hmp->base; 1851 void __iomem *ioaddr = hmp->base;
1852 1852
1853 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1853 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1854 /* Unconditionally log net taps. */
1855 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1856 writew(0x000F, ioaddr + AddrMode); 1854 writew(0x000F, ioaddr + AddrMode);
1857 } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) { 1855 } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
1858 /* Too many to match, or accept all multicasts. */ 1856 /* Too many to match, or accept all multicasts. */
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index e7d9bf330287..ff5a67d619bb 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -111,7 +111,6 @@
111#include <linux/etherdevice.h> 111#include <linux/etherdevice.h>
112#include <linux/skbuff.h> 112#include <linux/skbuff.h>
113#include <linux/types.h> 113#include <linux/types.h>
114#include <linux/config.h> /* for CONFIG_PCI */
115#include <linux/delay.h> 114#include <linux/delay.h>
116#include <linux/init.h> 115#include <linux/init.h>
117#include <linux/bitops.h> 116#include <linux/bitops.h>
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 68d8af7df08e..fbda7614d0ef 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -28,7 +28,7 @@
28 */ 28 */
29 29
30#define IOC3_NAME "ioc3-eth" 30#define IOC3_NAME "ioc3-eth"
31#define IOC3_VERSION "2.6.3-3" 31#define IOC3_VERSION "2.6.3-4"
32 32
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/delay.h> 34#include <linux/delay.h>
@@ -1611,8 +1611,6 @@ static void ioc3_set_multicast_list(struct net_device *dev)
1611 netif_stop_queue(dev); /* Lock out others. */ 1611 netif_stop_queue(dev); /* Lock out others. */
1612 1612
1613 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1613 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1614 /* Unconditionally log net taps. */
1615 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1616 ip->emcr |= EMCR_PROMISC; 1614 ip->emcr |= EMCR_PROMISC;
1617 ioc3_w_emcr(ip->emcr); 1615 ioc3_w_emcr(ip->emcr);
1618 (void) ioc3_r_emcr(); 1616 (void) ioc3_r_emcr();
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 47f6f64d604c..415ba8dc94ce 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -45,7 +45,6 @@
45 45
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/moduleparam.h> 47#include <linux/moduleparam.h>
48#include <linux/config.h>
49#include <linux/kernel.h> 48#include <linux/kernel.h>
50#include <linux/types.h> 49#include <linux/types.h>
51#include <linux/errno.h> 50#include <linux/errno.h>
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 0ea65c4c6f85..b69776e00951 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -40,7 +40,6 @@
40 ********************************************************************/ 40 ********************************************************************/
41 41
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/config.h>
44#include <linux/kernel.h> 43#include <linux/kernel.h>
45#include <linux/types.h> 44#include <linux/types.h>
46#include <linux/skbuff.h> 45#include <linux/skbuff.h>
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 82b67af54c94..a51604b3651f 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -110,9 +110,6 @@ struct ixgb_adapter;
110#define IXGB_RXBUFFER_8192 8192 110#define IXGB_RXBUFFER_8192 8192
111#define IXGB_RXBUFFER_16384 16384 111#define IXGB_RXBUFFER_16384 16384
112 112
113/* How many Tx Descriptors do we need to call netif_wake_queue? */
114#define IXGB_TX_QUEUE_WAKE 16
115
116/* How many Rx Buffers do we bundle into one write to the hardware ? */ 113/* How many Rx Buffers do we bundle into one write to the hardware ? */
117#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */ 114#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
118 115
@@ -173,7 +170,7 @@ struct ixgb_adapter {
173 unsigned long led_status; 170 unsigned long led_status;
174 171
175 /* TX */ 172 /* TX */
176 struct ixgb_desc_ring tx_ring; 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
177 unsigned long timeo_start; 174 unsigned long timeo_start;
178 uint32_t tx_cmd_type; 175 uint32_t tx_cmd_type;
179 uint64_t hw_csum_tx_good; 176 uint64_t hw_csum_tx_good;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cf19b898ba9b..ba621083830a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -654,11 +654,7 @@ ixgb_phys_id(struct net_device *netdev, uint32_t data)
654 654
655 mod_timer(&adapter->blink_timer, jiffies); 655 mod_timer(&adapter->blink_timer, jiffies);
656 656
657 if (data) 657 msleep_interruptible(data * 1000);
658 schedule_timeout_interruptible(data * HZ);
659 else
660 schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
661
662 del_timer_sync(&adapter->blink_timer); 658 del_timer_sync(&adapter->blink_timer);
663 ixgb_led_off(&adapter->hw); 659 ixgb_led_off(&adapter->hw);
664 clear_bit(IXGB_LED_ON, &adapter->led_status); 660 clear_bit(IXGB_LED_ON, &adapter->led_status);
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index f7fa10e47fa2..2b1515574faf 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -236,6 +236,17 @@ ixgb_identify_phy(struct ixgb_hw *hw)
236 DEBUGOUT("Identified G6104 optics\n"); 236 DEBUGOUT("Identified G6104 optics\n");
237 phy_type = ixgb_phy_type_g6104; 237 phy_type = ixgb_phy_type_g6104;
238 break; 238 break;
239 case IXGB_DEVICE_ID_82597EX_CX4:
240 DEBUGOUT("Identified CX4\n");
241 xpak_vendor = ixgb_identify_xpak_vendor(hw);
242 if (xpak_vendor == ixgb_xpak_vendor_intel) {
243 DEBUGOUT("Identified TXN17201 optics\n");
244 phy_type = ixgb_phy_type_txn17201;
245 } else {
246 DEBUGOUT("Identified G6005 optics\n");
247 phy_type = ixgb_phy_type_g6005;
248 }
249 break;
239 default: 250 default:
240 DEBUGOUT("Unknown physical layer module\n"); 251 DEBUGOUT("Unknown physical layer module\n");
241 phy_type = ixgb_phy_type_unknown; 252 phy_type = ixgb_phy_type_unknown;
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index 40a085f94c7b..9fd61189b4b2 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -45,6 +45,7 @@
45 45
46#define IXGB_DEVICE_ID_82597EX_CX4 0x109E 46#define IXGB_DEVICE_ID_82597EX_CX4 0x109E
47#define IXGB_SUBDEVICE_ID_A00C 0xA00C 47#define IXGB_SUBDEVICE_ID_A00C 0xA00C
48#define IXGB_SUBDEVICE_ID_A01C 0xA01C
48 49
49#endif /* #ifndef _IXGB_IDS_H_ */ 50#endif /* #ifndef _IXGB_IDS_H_ */
50/* End of File */ 51/* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 7bbd447289b5..e36dee1dd333 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "1.0.109-k2"DRIVERNAPI 39#define DRV_VERSION "1.0.112-k2"DRIVERNAPI
40char ixgb_driver_version[] = DRV_VERSION; 40char ixgb_driver_version[] = DRV_VERSION;
41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -118,15 +118,26 @@ static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
118static void ixgb_netpoll(struct net_device *dev); 118static void ixgb_netpoll(struct net_device *dev);
119#endif 119#endif
120 120
121/* Exported from other modules */ 121static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
122 enum pci_channel_state state);
123static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
124static void ixgb_io_resume (struct pci_dev *pdev);
122 125
126/* Exported from other modules */
123extern void ixgb_check_options(struct ixgb_adapter *adapter); 127extern void ixgb_check_options(struct ixgb_adapter *adapter);
124 128
129static struct pci_error_handlers ixgb_err_handler = {
130 .error_detected = ixgb_io_error_detected,
131 .slot_reset = ixgb_io_slot_reset,
132 .resume = ixgb_io_resume,
133};
134
125static struct pci_driver ixgb_driver = { 135static struct pci_driver ixgb_driver = {
126 .name = ixgb_driver_name, 136 .name = ixgb_driver_name,
127 .id_table = ixgb_pci_tbl, 137 .id_table = ixgb_pci_tbl,
128 .probe = ixgb_probe, 138 .probe = ixgb_probe,
129 .remove = __devexit_p(ixgb_remove), 139 .remove = __devexit_p(ixgb_remove),
140 .err_handler = &ixgb_err_handler
130}; 141};
131 142
132MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 143MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
@@ -140,12 +151,12 @@ module_param(debug, int, 0);
140MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 151MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
141 152
142/* some defines for controlling descriptor fetches in h/w */ 153/* some defines for controlling descriptor fetches in h/w */
143#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */ 154#define RXDCTL_WTHRESH_DEFAULT 15 /* chip writes back at this many or RXT0 */
144#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below 155#define RXDCTL_PTHRESH_DEFAULT 0 /* chip considers prefech below
145 * this */ 156 * this */
146#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail 157#define RXDCTL_HTHRESH_DEFAULT 0 /* chip will only prefetch if tail
147 * is pushed this many descriptors 158 * is pushed this many descriptors
148 * from head */ 159 * from head */
149 160
150/** 161/**
151 * ixgb_init_module - Driver Registration Routine 162 * ixgb_init_module - Driver Registration Routine
@@ -162,7 +173,7 @@ ixgb_init_module(void)
162 173
163 printk(KERN_INFO "%s\n", ixgb_copyright); 174 printk(KERN_INFO "%s\n", ixgb_copyright);
164 175
165 return pci_module_init(&ixgb_driver); 176 return pci_register_driver(&ixgb_driver);
166} 177}
167 178
168module_init(ixgb_init_module); 179module_init(ixgb_init_module);
@@ -1174,6 +1185,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1174 int err; 1185 int err;
1175 1186
1176 if (likely(skb_is_gso(skb))) { 1187 if (likely(skb_is_gso(skb))) {
1188 struct ixgb_buffer *buffer_info;
1177 if (skb_header_cloned(skb)) { 1189 if (skb_header_cloned(skb)) {
1178 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1190 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1179 if (err) 1191 if (err)
@@ -1196,6 +1208,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1196 1208
1197 i = adapter->tx_ring.next_to_use; 1209 i = adapter->tx_ring.next_to_use;
1198 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); 1210 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1211 buffer_info = &adapter->tx_ring.buffer_info[i];
1212 WARN_ON(buffer_info->dma != 0);
1199 1213
1200 context_desc->ipcss = ipcss; 1214 context_desc->ipcss = ipcss;
1201 context_desc->ipcso = ipcso; 1215 context_desc->ipcso = ipcso;
@@ -1233,11 +1247,14 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1233 uint8_t css, cso; 1247 uint8_t css, cso;
1234 1248
1235 if(likely(skb->ip_summed == CHECKSUM_HW)) { 1249 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1250 struct ixgb_buffer *buffer_info;
1236 css = skb->h.raw - skb->data; 1251 css = skb->h.raw - skb->data;
1237 cso = (skb->h.raw + skb->csum) - skb->data; 1252 cso = (skb->h.raw + skb->csum) - skb->data;
1238 1253
1239 i = adapter->tx_ring.next_to_use; 1254 i = adapter->tx_ring.next_to_use;
1240 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i); 1255 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1256 buffer_info = &adapter->tx_ring.buffer_info[i];
1257 WARN_ON(buffer_info->dma != 0);
1241 1258
1242 context_desc->tucss = css; 1259 context_desc->tucss = css;
1243 context_desc->tucso = cso; 1260 context_desc->tucso = cso;
@@ -1283,6 +1300,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1283 buffer_info = &tx_ring->buffer_info[i]; 1300 buffer_info = &tx_ring->buffer_info[i];
1284 size = min(len, IXGB_MAX_DATA_PER_TXD); 1301 size = min(len, IXGB_MAX_DATA_PER_TXD);
1285 buffer_info->length = size; 1302 buffer_info->length = size;
1303 WARN_ON(buffer_info->dma != 0);
1286 buffer_info->dma = 1304 buffer_info->dma =
1287 pci_map_single(adapter->pdev, 1305 pci_map_single(adapter->pdev,
1288 skb->data + offset, 1306 skb->data + offset,
@@ -1543,6 +1561,11 @@ void
1543ixgb_update_stats(struct ixgb_adapter *adapter) 1561ixgb_update_stats(struct ixgb_adapter *adapter)
1544{ 1562{
1545 struct net_device *netdev = adapter->netdev; 1563 struct net_device *netdev = adapter->netdev;
1564 struct pci_dev *pdev = adapter->pdev;
1565
1566 /* Prevent stats update while adapter is being reset */
1567 if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
1568 return;
1546 1569
1547 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) || 1570 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1548 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) { 1571 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
@@ -1787,7 +1810,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1787 if (unlikely(netif_queue_stopped(netdev))) { 1810 if (unlikely(netif_queue_stopped(netdev))) {
1788 spin_lock(&adapter->tx_lock); 1811 spin_lock(&adapter->tx_lock);
1789 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) && 1812 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1790 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) 1813 (IXGB_DESC_UNUSED(tx_ring) >= DESC_NEEDED))
1791 netif_wake_queue(netdev); 1814 netif_wake_queue(netdev);
1792 spin_unlock(&adapter->tx_lock); 1815 spin_unlock(&adapter->tx_lock);
1793 } 1816 }
@@ -1948,10 +1971,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1948#define IXGB_CB_LENGTH 256 1971#define IXGB_CB_LENGTH 256
1949 if (length < IXGB_CB_LENGTH) { 1972 if (length < IXGB_CB_LENGTH) {
1950 struct sk_buff *new_skb = 1973 struct sk_buff *new_skb =
1951 dev_alloc_skb(length + NET_IP_ALIGN); 1974 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
1952 if (new_skb) { 1975 if (new_skb) {
1953 skb_reserve(new_skb, NET_IP_ALIGN); 1976 skb_reserve(new_skb, NET_IP_ALIGN);
1954 new_skb->dev = netdev;
1955 memcpy(new_skb->data - NET_IP_ALIGN, 1977 memcpy(new_skb->data - NET_IP_ALIGN,
1956 skb->data - NET_IP_ALIGN, 1978 skb->data - NET_IP_ALIGN,
1957 length + NET_IP_ALIGN); 1979 length + NET_IP_ALIGN);
@@ -2031,14 +2053,14 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2031 /* leave three descriptors unused */ 2053 /* leave three descriptors unused */
2032 while(--cleancount > 2) { 2054 while(--cleancount > 2) {
2033 /* recycle! its good for you */ 2055 /* recycle! its good for you */
2034 if (!(skb = buffer_info->skb)) 2056 skb = buffer_info->skb;
2035 skb = dev_alloc_skb(adapter->rx_buffer_len 2057 if (skb) {
2036 + NET_IP_ALIGN);
2037 else {
2038 skb_trim(skb, 0); 2058 skb_trim(skb, 0);
2039 goto map_skb; 2059 goto map_skb;
2040 } 2060 }
2041 2061
2062 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len
2063 + NET_IP_ALIGN);
2042 if (unlikely(!skb)) { 2064 if (unlikely(!skb)) {
2043 /* Better luck next round */ 2065 /* Better luck next round */
2044 adapter->alloc_rx_buff_failed++; 2066 adapter->alloc_rx_buff_failed++;
@@ -2051,8 +2073,6 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2051 */ 2073 */
2052 skb_reserve(skb, NET_IP_ALIGN); 2074 skb_reserve(skb, NET_IP_ALIGN);
2053 2075
2054 skb->dev = netdev;
2055
2056 buffer_info->skb = skb; 2076 buffer_info->skb = skb;
2057 buffer_info->length = adapter->rx_buffer_len; 2077 buffer_info->length = adapter->rx_buffer_len;
2058map_skb: 2078map_skb:
@@ -2190,7 +2210,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2190 2210
2191static void ixgb_netpoll(struct net_device *dev) 2211static void ixgb_netpoll(struct net_device *dev)
2192{ 2212{
2193 struct ixgb_adapter *adapter = dev->priv; 2213 struct ixgb_adapter *adapter = netdev_priv(dev);
2194 2214
2195 disable_irq(adapter->pdev->irq); 2215 disable_irq(adapter->pdev->irq);
2196 ixgb_intr(adapter->pdev->irq, dev, NULL); 2216 ixgb_intr(adapter->pdev->irq, dev, NULL);
@@ -2198,4 +2218,98 @@ static void ixgb_netpoll(struct net_device *dev)
2198} 2218}
2199#endif 2219#endif
2200 2220
2221/**
2222 * ixgb_io_error_detected() - called when PCI error is detected
2223 * @pdev pointer to pci device with error
2224 * @state pci channel state after error
2225 *
2226 * This callback is called by the PCI subsystem whenever
2227 * a PCI bus error is detected.
2228 */
2229static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
2230 enum pci_channel_state state)
2231{
2232 struct net_device *netdev = pci_get_drvdata(pdev);
2233 struct ixgb_adapter *adapter = netdev->priv;
2234
2235 if(netif_running(netdev))
2236 ixgb_down(adapter, TRUE);
2237
2238 pci_disable_device(pdev);
2239
2240 /* Request a slot reset. */
2241 return PCI_ERS_RESULT_NEED_RESET;
2242}
2243
2244/**
2245 * ixgb_io_slot_reset - called after the pci bus has been reset.
2246 * @pdev pointer to pci device with error
2247 *
2248 * This callback is called after the PCI buss has been reset.
2249 * Basically, this tries to restart the card from scratch.
2250 * This is a shortened version of the device probe/discovery code,
2251 * it resembles the first-half of the ixgb_probe() routine.
2252 */
2253static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev)
2254{
2255 struct net_device *netdev = pci_get_drvdata(pdev);
2256 struct ixgb_adapter *adapter = netdev->priv;
2257
2258 if(pci_enable_device(pdev)) {
2259 DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
2260 return PCI_ERS_RESULT_DISCONNECT;
2261 }
2262
2263 /* Perform card reset only on one instance of the card */
2264 if (0 != PCI_FUNC (pdev->devfn))
2265 return PCI_ERS_RESULT_RECOVERED;
2266
2267 pci_set_master(pdev);
2268
2269 netif_carrier_off(netdev);
2270 netif_stop_queue(netdev);
2271 ixgb_reset(adapter);
2272
2273 /* Make sure the EEPROM is good */
2274 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
2275 DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
2276 return PCI_ERS_RESULT_DISCONNECT;
2277 }
2278 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
2279 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
2280
2281 if(!is_valid_ether_addr(netdev->perm_addr)) {
2282 DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
2283 return PCI_ERS_RESULT_DISCONNECT;
2284 }
2285
2286 return PCI_ERS_RESULT_RECOVERED;
2287}
2288
2289/**
2290 * ixgb_io_resume - called when its OK to resume normal operations
2291 * @pdev pointer to pci device with error
2292 *
2293 * The error recovery driver tells us that its OK to resume
2294 * normal operation. Implementation resembles the second-half
2295 * of the ixgb_probe() routine.
2296 */
2297static void ixgb_io_resume (struct pci_dev *pdev)
2298{
2299 struct net_device *netdev = pci_get_drvdata(pdev);
2300 struct ixgb_adapter *adapter = netdev->priv;
2301
2302 pci_set_master(pdev);
2303
2304 if(netif_running(netdev)) {
2305 if(ixgb_up(adapter)) {
2306 printk ("ixgb: can't bring device back up after reset\n");
2307 return;
2308 }
2309 }
2310
2311 netif_device_attach(netdev);
2312 mod_timer(&adapter->watchdog_timer, jiffies);
2313}
2314
2201/* ixgb_main.c */ 2315/* ixgb_main.c */
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index c1c3452c90ca..dc997be44ed7 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -42,7 +42,7 @@
42 Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004 42 Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43*/ 43*/
44 44
45static const char version[] = "lance.c:v1.15ac 1999/11/13 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n"; 45static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46 46
47#include <linux/module.h> 47#include <linux/module.h>
48#include <linux/kernel.h> 48#include <linux/kernel.h>
@@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
326MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); 326MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
327MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); 327MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
328 328
329int init_module(void) 329int __init init_module(void)
330{ 330{
331 struct net_device *dev; 331 struct net_device *dev;
332 int this_dev, found = 0; 332 int this_dev, found = 0;
@@ -1281,8 +1281,6 @@ static void set_multicast_list(struct net_device *dev)
1281 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */ 1281 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1282 1282
1283 if (dev->flags&IFF_PROMISC) { 1283 if (dev->flags&IFF_PROMISC) {
1284 /* Log any net taps. */
1285 printk("%s: Promiscuous mode enabled.\n", dev->name);
1286 outw(15, ioaddr+LANCE_ADDR); 1284 outw(15, ioaddr+LANCE_ADDR);
1287 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */ 1285 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1288 } else { 1286 } else {
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 646e89fc3562..c0ec7f6abcb2 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
406MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); 406MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
407MODULE_LICENSE("GPL"); 407MODULE_LICENSE("GPL");
408 408
409int init_module(void) 409int __init init_module(void)
410{ 410{
411 struct net_device *dev; 411 struct net_device *dev;
412 int this_dev, found = 0; 412 int this_dev, found = 0;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 07ca9480a6fe..b19e2034d11f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -177,6 +177,7 @@ struct myri10ge_priv {
177 struct work_struct watchdog_work; 177 struct work_struct watchdog_work;
178 struct timer_list watchdog_timer; 178 struct timer_list watchdog_timer;
179 int watchdog_tx_done; 179 int watchdog_tx_done;
180 int watchdog_tx_req;
180 int watchdog_resets; 181 int watchdog_resets;
181 int tx_linearized; 182 int tx_linearized;
182 int pause; 183 int pause;
@@ -186,11 +187,14 @@ struct myri10ge_priv {
186 u8 mac_addr[6]; /* eeprom mac address */ 187 u8 mac_addr[6]; /* eeprom mac address */
187 unsigned long serial_number; 188 unsigned long serial_number;
188 int vendor_specific_offset; 189 int vendor_specific_offset;
190 int fw_multicast_support;
189 u32 devctl; 191 u32 devctl;
190 u16 msi_flags; 192 u16 msi_flags;
191 u32 read_dma; 193 u32 read_dma;
192 u32 write_dma; 194 u32 write_dma;
193 u32 read_write_dma; 195 u32 read_write_dma;
196 u32 link_changes;
197 u32 msg_enable;
194}; 198};
195 199
196static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat"; 200static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
@@ -256,6 +260,12 @@ module_param(myri10ge_max_irq_loops, int, S_IRUGO);
256MODULE_PARM_DESC(myri10ge_max_irq_loops, 260MODULE_PARM_DESC(myri10ge_max_irq_loops,
257 "Set stuck legacy IRQ detection threshold\n"); 261 "Set stuck legacy IRQ detection threshold\n");
258 262
263#define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
264
265static int myri10ge_debug = -1; /* defaults above */
266module_param(myri10ge_debug, int, 0);
267MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
268
259#define MYRI10GE_FW_OFFSET 1024*1024 269#define MYRI10GE_FW_OFFSET 1024*1024
260#define MYRI10GE_HIGHPART_TO_U32(X) \ 270#define MYRI10GE_HIGHPART_TO_U32(X) \
261(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) 271(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
@@ -270,7 +280,7 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
270 struct mcp_cmd *buf; 280 struct mcp_cmd *buf;
271 char buf_bytes[sizeof(*buf) + 8]; 281 char buf_bytes[sizeof(*buf) + 8];
272 struct mcp_cmd_response *response = mgp->cmd; 282 struct mcp_cmd_response *response = mgp->cmd;
273 char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET; 283 char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
274 u32 dma_low, dma_high, result, value; 284 u32 dma_low, dma_high, result, value;
275 int sleep_total = 0; 285 int sleep_total = 0;
276 286
@@ -319,6 +329,8 @@ myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
319 if (result == 0) { 329 if (result == 0) {
320 data->data0 = value; 330 data->data0 = value;
321 return 0; 331 return 0;
332 } else if (result == MXGEFW_CMD_UNKNOWN) {
333 return -ENOSYS;
322 } else { 334 } else {
323 dev_err(&mgp->pdev->dev, 335 dev_err(&mgp->pdev->dev,
324 "command %d failed, result = %d\n", 336 "command %d failed, result = %d\n",
@@ -403,7 +415,7 @@ static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
403 buf[4] = htonl(dma_low); /* dummy addr LSW */ 415 buf[4] = htonl(dma_low); /* dummy addr LSW */
404 buf[5] = htonl(enable); /* enable? */ 416 buf[5] = htonl(enable); /* enable? */
405 417
406 submit = mgp->sram + 0xfc01c0; 418 submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
407 419
408 myri10ge_pio_copy(submit, &buf, sizeof(buf)); 420 myri10ge_pio_copy(submit, &buf, sizeof(buf));
409 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++) 421 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
@@ -448,6 +460,7 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
448 struct mcp_gen_header *hdr; 460 struct mcp_gen_header *hdr;
449 size_t hdr_offset; 461 size_t hdr_offset;
450 int status; 462 int status;
463 unsigned i;
451 464
452 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) { 465 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
453 dev_err(dev, "Unable to load %s firmware image via hotplug\n", 466 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
@@ -479,18 +492,12 @@ static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
479 goto abort_with_fw; 492 goto abort_with_fw;
480 493
481 crc = crc32(~0, fw->data, fw->size); 494 crc = crc32(~0, fw->data, fw->size);
482 if (mgp->tx.boundary == 2048) { 495 for (i = 0; i < fw->size; i += 256) {
483 /* Avoid PCI burst on chipset with unaligned completions. */ 496 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
484 int i; 497 fw->data + i,
485 __iomem u32 *ptr = (__iomem u32 *) (mgp->sram + 498 min(256U, (unsigned)(fw->size - i)));
486 MYRI10GE_FW_OFFSET); 499 mb();
487 for (i = 0; i < fw->size / 4; i++) { 500 readb(mgp->sram);
488 __raw_writel(((u32 *) fw->data)[i], ptr + i);
489 wmb();
490 }
491 } else {
492 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
493 fw->size);
494 } 501 }
495 /* corruption checking is good for parity recovery and buggy chipset */ 502 /* corruption checking is good for parity recovery and buggy chipset */
496 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size); 503 memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
@@ -604,7 +611,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
604 buf[5] = htonl(8); /* where to copy to */ 611 buf[5] = htonl(8); /* where to copy to */
605 buf[6] = htonl(0); /* where to jump to */ 612 buf[6] = htonl(0); /* where to jump to */
606 613
607 submit = mgp->sram + 0xfc0000; 614 submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
608 615
609 myri10ge_pio_copy(submit, &buf, sizeof(buf)); 616 myri10ge_pio_copy(submit, &buf, sizeof(buf));
610 mb(); 617 mb();
@@ -620,7 +627,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
620 return -ENXIO; 627 return -ENXIO;
621 } 628 }
622 dev_info(&mgp->pdev->dev, "handoff confirmed\n"); 629 dev_info(&mgp->pdev->dev, "handoff confirmed\n");
623 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 630 myri10ge_dummy_rdma(mgp, 1);
624 631
625 return 0; 632 return 0;
626} 633}
@@ -768,6 +775,7 @@ static int myri10ge_reset(struct myri10ge_priv *mgp)
768 mgp->rx_small.cnt = 0; 775 mgp->rx_small.cnt = 0;
769 mgp->rx_done.idx = 0; 776 mgp->rx_done.idx = 0;
770 mgp->rx_done.cnt = 0; 777 mgp->rx_done.cnt = 0;
778 mgp->link_changes = 0;
771 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr); 779 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
772 myri10ge_change_promisc(mgp, 0, 0); 780 myri10ge_change_promisc(mgp, 0, 0);
773 myri10ge_change_pause(mgp, mgp->pause); 781 myri10ge_change_pause(mgp, mgp->pause);
@@ -802,12 +810,13 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
802 * pages directly and building a fraglist in the near future. 810 * pages directly and building a fraglist in the near future.
803 */ 811 */
804 812
805static inline struct sk_buff *myri10ge_alloc_big(int bytes) 813static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev,
814 int bytes)
806{ 815{
807 struct sk_buff *skb; 816 struct sk_buff *skb;
808 unsigned long data, roundup; 817 unsigned long data, roundup;
809 818
810 skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD); 819 skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD);
811 if (skb == NULL) 820 if (skb == NULL)
812 return NULL; 821 return NULL;
813 822
@@ -825,12 +834,13 @@ static inline struct sk_buff *myri10ge_alloc_big(int bytes)
825 834
826/* Allocate 2x as much space as required and use whichever portion 835/* Allocate 2x as much space as required and use whichever portion
827 * does not cross a 4KB boundary */ 836 * does not cross a 4KB boundary */
828static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes) 837static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev,
838 unsigned int bytes)
829{ 839{
830 struct sk_buff *skb; 840 struct sk_buff *skb;
831 unsigned long data, boundary; 841 unsigned long data, boundary;
832 842
833 skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1); 843 skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1);
834 if (unlikely(skb == NULL)) 844 if (unlikely(skb == NULL))
835 return NULL; 845 return NULL;
836 846
@@ -851,12 +861,13 @@ static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
851 861
852/* Allocate just enough space, and verify that the allocated 862/* Allocate just enough space, and verify that the allocated
853 * space does not cross a 4KB boundary */ 863 * space does not cross a 4KB boundary */
854static inline struct sk_buff *myri10ge_alloc_small(int bytes) 864static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev,
865 int bytes)
855{ 866{
856 struct sk_buff *skb; 867 struct sk_buff *skb;
857 unsigned long roundup, data, end; 868 unsigned long roundup, data, end;
858 869
859 skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD); 870 skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD);
860 if (unlikely(skb == NULL)) 871 if (unlikely(skb == NULL))
861 return NULL; 872 return NULL;
862 873
@@ -872,15 +883,17 @@ static inline struct sk_buff *myri10ge_alloc_small(int bytes)
872 "myri10ge_alloc_small: small skb crossed 4KB boundary\n"); 883 "myri10ge_alloc_small: small skb crossed 4KB boundary\n");
873 myri10ge_skb_cross_4k = 1; 884 myri10ge_skb_cross_4k = 1;
874 dev_kfree_skb_any(skb); 885 dev_kfree_skb_any(skb);
875 skb = myri10ge_alloc_small_safe(bytes); 886 skb = myri10ge_alloc_small_safe(dev, bytes);
876 } 887 }
877 return skb; 888 return skb;
878} 889}
879 890
880static inline int 891static inline int
881myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes, 892myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp,
882 int idx) 893 int bytes, int idx)
883{ 894{
895 struct net_device *dev = mgp->dev;
896 struct pci_dev *pdev = mgp->pdev;
884 struct sk_buff *skb; 897 struct sk_buff *skb;
885 dma_addr_t bus; 898 dma_addr_t bus;
886 int len, retval = 0; 899 int len, retval = 0;
@@ -888,11 +901,11 @@ myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
888 bytes += VLAN_HLEN; /* account for 802.1q vlan tag */ 901 bytes += VLAN_HLEN; /* account for 802.1q vlan tag */
889 902
890 if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ ) 903 if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
891 skb = myri10ge_alloc_big(bytes); 904 skb = myri10ge_alloc_big(dev, bytes);
892 else if (myri10ge_skb_cross_4k) 905 else if (myri10ge_skb_cross_4k)
893 skb = myri10ge_alloc_small_safe(bytes); 906 skb = myri10ge_alloc_small_safe(dev, bytes);
894 else 907 else
895 skb = myri10ge_alloc_small(bytes); 908 skb = myri10ge_alloc_small(dev, bytes);
896 909
897 if (unlikely(skb == NULL)) { 910 if (unlikely(skb == NULL)) {
898 rx->alloc_fail++; 911 rx->alloc_fail++;
@@ -955,7 +968,7 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
955 unmap_len = pci_unmap_len(&rx->info[idx], len); 968 unmap_len = pci_unmap_len(&rx->info[idx], len);
956 969
957 /* try to replace the received skb */ 970 /* try to replace the received skb */
958 if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) { 971 if (myri10ge_getbuf(rx, mgp, bytes, idx)) {
959 /* drop the frame -- the old skbuf is re-cycled */ 972 /* drop the frame -- the old skbuf is re-cycled */
960 mgp->stats.rx_dropped += 1; 973 mgp->stats.rx_dropped += 1;
961 return 0; 974 return 0;
@@ -972,7 +985,6 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
972 skb_put(skb, len); 985 skb_put(skb, len);
973 986
974 skb->protocol = eth_type_trans(skb, mgp->dev); 987 skb->protocol = eth_type_trans(skb, mgp->dev);
975 skb->dev = mgp->dev;
976 if (mgp->csum_flag) { 988 if (mgp->csum_flag) {
977 if ((skb->protocol == ntohs(ETH_P_IP)) || 989 if ((skb->protocol == ntohs(ETH_P_IP)) ||
978 (skb->protocol == ntohs(ETH_P_IPV6))) { 990 (skb->protocol == ntohs(ETH_P_IPV6))) {
@@ -1085,13 +1097,19 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1085 if (mgp->link_state != stats->link_up) { 1097 if (mgp->link_state != stats->link_up) {
1086 mgp->link_state = stats->link_up; 1098 mgp->link_state = stats->link_up;
1087 if (mgp->link_state) { 1099 if (mgp->link_state) {
1088 printk(KERN_INFO "myri10ge: %s: link up\n", 1100 if (netif_msg_link(mgp))
1089 mgp->dev->name); 1101 printk(KERN_INFO
1102 "myri10ge: %s: link up\n",
1103 mgp->dev->name);
1090 netif_carrier_on(mgp->dev); 1104 netif_carrier_on(mgp->dev);
1105 mgp->link_changes++;
1091 } else { 1106 } else {
1092 printk(KERN_INFO "myri10ge: %s: link down\n", 1107 if (netif_msg_link(mgp))
1093 mgp->dev->name); 1108 printk(KERN_INFO
1109 "myri10ge: %s: link down\n",
1110 mgp->dev->name);
1094 netif_carrier_off(mgp->dev); 1111 netif_carrier_off(mgp->dev);
1112 mgp->link_changes++;
1095 } 1113 }
1096 } 1114 }
1097 if (mgp->rdma_tags_available != 1115 if (mgp->rdma_tags_available !=
@@ -1293,7 +1311,8 @@ static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
1293 "serial_number", "tx_pkt_start", "tx_pkt_done", 1311 "serial_number", "tx_pkt_start", "tx_pkt_done",
1294 "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt", 1312 "tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
1295 "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized", 1313 "wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
1296 "link_up", "dropped_link_overflow", "dropped_link_error_or_filtered", 1314 "link_changes", "link_up", "dropped_link_overflow",
1315 "dropped_link_error_or_filtered", "dropped_multicast_filtered",
1297 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer", 1316 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1298 "dropped_no_big_buffer" 1317 "dropped_no_big_buffer"
1299}; 1318};
@@ -1345,16 +1364,31 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1345 data[i++] = (unsigned int)mgp->stop_queue; 1364 data[i++] = (unsigned int)mgp->stop_queue;
1346 data[i++] = (unsigned int)mgp->watchdog_resets; 1365 data[i++] = (unsigned int)mgp->watchdog_resets;
1347 data[i++] = (unsigned int)mgp->tx_linearized; 1366 data[i++] = (unsigned int)mgp->tx_linearized;
1367 data[i++] = (unsigned int)mgp->link_changes;
1348 data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up); 1368 data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
1349 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow); 1369 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
1350 data[i++] = 1370 data[i++] =
1351 (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered); 1371 (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
1372 data[i++] =
1373 (unsigned int)ntohl(mgp->fw_stats->dropped_multicast_filtered);
1352 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt); 1374 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
1353 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun); 1375 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
1354 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer); 1376 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
1355 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer); 1377 data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
1356} 1378}
1357 1379
1380static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
1381{
1382 struct myri10ge_priv *mgp = netdev_priv(netdev);
1383 mgp->msg_enable = value;
1384}
1385
1386static u32 myri10ge_get_msglevel(struct net_device *netdev)
1387{
1388 struct myri10ge_priv *mgp = netdev_priv(netdev);
1389 return mgp->msg_enable;
1390}
1391
1358static struct ethtool_ops myri10ge_ethtool_ops = { 1392static struct ethtool_ops myri10ge_ethtool_ops = {
1359 .get_settings = myri10ge_get_settings, 1393 .get_settings = myri10ge_get_settings,
1360 .get_drvinfo = myri10ge_get_drvinfo, 1394 .get_drvinfo = myri10ge_get_drvinfo,
@@ -1375,7 +1409,9 @@ static struct ethtool_ops myri10ge_ethtool_ops = {
1375#endif 1409#endif
1376 .get_strings = myri10ge_get_strings, 1410 .get_strings = myri10ge_get_strings,
1377 .get_stats_count = myri10ge_get_stats_count, 1411 .get_stats_count = myri10ge_get_stats_count,
1378 .get_ethtool_stats = myri10ge_get_ethtool_stats 1412 .get_ethtool_stats = myri10ge_get_ethtool_stats,
1413 .set_msglevel = myri10ge_set_msglevel,
1414 .get_msglevel = myri10ge_get_msglevel
1379}; 1415};
1380 1416
1381static int myri10ge_allocate_rings(struct net_device *dev) 1417static int myri10ge_allocate_rings(struct net_device *dev)
@@ -1443,7 +1479,7 @@ static int myri10ge_allocate_rings(struct net_device *dev)
1443 /* Fill the receive rings */ 1479 /* Fill the receive rings */
1444 1480
1445 for (i = 0; i <= mgp->rx_small.mask; i++) { 1481 for (i = 0; i <= mgp->rx_small.mask; i++) {
1446 status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev, 1482 status = myri10ge_getbuf(&mgp->rx_small, mgp,
1447 mgp->small_bytes, i); 1483 mgp->small_bytes, i);
1448 if (status) { 1484 if (status) {
1449 printk(KERN_ERR 1485 printk(KERN_ERR
@@ -1455,8 +1491,7 @@ static int myri10ge_allocate_rings(struct net_device *dev)
1455 1491
1456 for (i = 0; i <= mgp->rx_big.mask; i++) { 1492 for (i = 0; i <= mgp->rx_big.mask; i++) {
1457 status = 1493 status =
1458 myri10ge_getbuf(&mgp->rx_big, mgp->pdev, 1494 myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i);
1459 dev->mtu + ETH_HLEN, i);
1460 if (status) { 1495 if (status) {
1461 printk(KERN_ERR 1496 printk(KERN_ERR
1462 "myri10ge: %s: alloced only %d big bufs\n", 1497 "myri10ge: %s: alloced only %d big bufs\n",
@@ -1652,9 +1687,11 @@ static int myri10ge_open(struct net_device *dev)
1652 } 1687 }
1653 1688
1654 if (mgp->mtrr >= 0) { 1689 if (mgp->mtrr >= 0) {
1655 mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000; 1690 mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + MXGEFW_ETH_SEND_4;
1656 mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000; 1691 mgp->rx_small.wc_fifo =
1657 mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000; 1692 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_SMALL;
1693 mgp->rx_big.wc_fifo =
1694 (u8 __iomem *) mgp->sram + MXGEFW_ETH_RECV_BIG;
1658 } else { 1695 } else {
1659 mgp->tx.wc_fifo = NULL; 1696 mgp->tx.wc_fifo = NULL;
1660 mgp->rx_small.wc_fifo = NULL; 1697 mgp->rx_small.wc_fifo = NULL;
@@ -1690,7 +1727,21 @@ static int myri10ge_open(struct net_device *dev)
1690 1727
1691 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus); 1728 cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
1692 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus); 1729 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
1693 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0); 1730 cmd.data2 = sizeof(struct mcp_irq_data);
1731 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
1732 if (status == -ENOSYS) {
1733 dma_addr_t bus = mgp->fw_stats_bus;
1734 bus += offsetof(struct mcp_irq_data, send_done_count);
1735 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
1736 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
1737 status = myri10ge_send_cmd(mgp,
1738 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
1739 &cmd, 0);
1740 /* Firmware cannot support multicast without STATS_DMA_V2 */
1741 mgp->fw_multicast_support = 0;
1742 } else {
1743 mgp->fw_multicast_support = 1;
1744 }
1694 if (status) { 1745 if (status) {
1695 printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n", 1746 printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
1696 dev->name); 1747 dev->name);
@@ -1845,7 +1896,8 @@ myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
1845 if (cnt > 0) { 1896 if (cnt > 0) {
1846 /* pad it to 64 bytes. The src is 64 bytes bigger than it 1897 /* pad it to 64 bytes. The src is 64 bytes bigger than it
1847 * needs to be so that we don't overrun it */ 1898 * needs to be so that we don't overrun it */
1848 myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64); 1899 myri10ge_pio_copy(tx->wc_fifo + MXGEFW_ETH_SEND_OFFSET(cnt),
1900 src, 64);
1849 mb(); 1901 mb();
1850 } 1902 }
1851} 1903}
@@ -2144,9 +2196,81 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
2144 2196
2145static void myri10ge_set_multicast_list(struct net_device *dev) 2197static void myri10ge_set_multicast_list(struct net_device *dev)
2146{ 2198{
2199 struct myri10ge_cmd cmd;
2200 struct myri10ge_priv *mgp;
2201 struct dev_mc_list *mc_list;
2202 int err;
2203
2204 mgp = netdev_priv(dev);
2147 /* can be called from atomic contexts, 2205 /* can be called from atomic contexts,
2148 * pass 1 to force atomicity in myri10ge_send_cmd() */ 2206 * pass 1 to force atomicity in myri10ge_send_cmd() */
2149 myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1); 2207 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
2208
2209 /* This firmware is known to not support multicast */
2210 if (!mgp->fw_multicast_support)
2211 return;
2212
2213 /* Disable multicast filtering */
2214
2215 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
2216 if (err != 0) {
2217 printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI,"
2218 " error status: %d\n", dev->name, err);
2219 goto abort;
2220 }
2221
2222 if (dev->flags & IFF_ALLMULTI) {
2223 /* request to disable multicast filtering, so quit here */
2224 return;
2225 }
2226
2227 /* Flush the filters */
2228
2229 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
2230 &cmd, 1);
2231 if (err != 0) {
2232 printk(KERN_ERR
2233 "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
2234 ", error status: %d\n", dev->name, err);
2235 goto abort;
2236 }
2237
2238 /* Walk the multicast list, and add each address */
2239 for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
2240 memcpy(&cmd.data0, &mc_list->dmi_addr, 4);
2241 memcpy(&cmd.data1, ((char *)&mc_list->dmi_addr) + 4, 2);
2242 cmd.data0 = htonl(cmd.data0);
2243 cmd.data1 = htonl(cmd.data1);
2244 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
2245 &cmd, 1);
2246
2247 if (err != 0) {
2248 printk(KERN_ERR "myri10ge: %s: Failed "
2249 "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
2250 "%d\t", dev->name, err);
2251 printk(KERN_ERR "MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2252 ((unsigned char *)&mc_list->dmi_addr)[0],
2253 ((unsigned char *)&mc_list->dmi_addr)[1],
2254 ((unsigned char *)&mc_list->dmi_addr)[2],
2255 ((unsigned char *)&mc_list->dmi_addr)[3],
2256 ((unsigned char *)&mc_list->dmi_addr)[4],
2257 ((unsigned char *)&mc_list->dmi_addr)[5]
2258 );
2259 goto abort;
2260 }
2261 }
2262 /* Enable multicast filtering */
2263 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
2264 if (err != 0) {
2265 printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI,"
2266 "error status: %d\n", dev->name, err);
2267 goto abort;
2268 }
2269
2270 return;
2271
2272abort:
2273 return;
2150} 2274}
2151 2275
2152static int myri10ge_set_mac_address(struct net_device *dev, void *addr) 2276static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
@@ -2293,6 +2417,8 @@ static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
2293 */ 2417 */
2294 2418
2295#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132 2419#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE 0x0132
2420#define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7
2421#define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa
2296 2422
2297static void myri10ge_select_firmware(struct myri10ge_priv *mgp) 2423static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
2298{ 2424{
@@ -2302,15 +2428,34 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
2302 mgp->fw_name = myri10ge_fw_unaligned; 2428 mgp->fw_name = myri10ge_fw_unaligned;
2303 2429
2304 if (myri10ge_force_firmware == 0) { 2430 if (myri10ge_force_firmware == 0) {
2431 int link_width, exp_cap;
2432 u16 lnk;
2433
2434 exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
2435 pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
2436 link_width = (lnk >> 4) & 0x3f;
2437
2305 myri10ge_enable_ecrc(mgp); 2438 myri10ge_enable_ecrc(mgp);
2306 2439
2307 /* Check to see if the upstream bridge is known to 2440 /* Check to see if Link is less than 8 or if the
2308 * provide aligned completions */ 2441 * upstream bridge is known to provide aligned
2309 if (bridge 2442 * completions */
2310 /* ServerWorks HT2000/HT1000 */ 2443 if (link_width < 8) {
2311 && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS 2444 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
2312 && bridge->device == 2445 link_width);
2313 PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) { 2446 mgp->tx.boundary = 4096;
2447 mgp->fw_name = myri10ge_fw_aligned;
2448 } else if (bridge &&
2449 /* ServerWorks HT2000/HT1000 */
2450 ((bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
2451 && bridge->device ==
2452 PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE)
2453 /* All Intel E5000 PCIE ports */
2454 || (bridge->vendor == PCI_VENDOR_ID_INTEL
2455 && bridge->device >=
2456 PCI_DEVICE_ID_INTEL_E5000_PCIE23
2457 && bridge->device <=
2458 PCI_DEVICE_ID_INTEL_E5000_PCIE47))) {
2314 dev_info(&mgp->pdev->dev, 2459 dev_info(&mgp->pdev->dev,
2315 "Assuming aligned completions (0x%x:0x%x)\n", 2460 "Assuming aligned completions (0x%x:0x%x)\n",
2316 bridge->vendor, bridge->device); 2461 bridge->vendor, bridge->device);
@@ -2429,7 +2574,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
2429 } 2574 }
2430 2575
2431 myri10ge_reset(mgp); 2576 myri10ge_reset(mgp);
2432 myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096); 2577 myri10ge_dummy_rdma(mgp, 1);
2433 2578
2434 /* Save configuration space to be restored if the 2579 /* Save configuration space to be restored if the
2435 * nic resets due to a parity error */ 2580 * nic resets due to a parity error */
@@ -2547,7 +2692,8 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2547 2692
2548 mgp = (struct myri10ge_priv *)arg; 2693 mgp = (struct myri10ge_priv *)arg;
2549 if (mgp->tx.req != mgp->tx.done && 2694 if (mgp->tx.req != mgp->tx.done &&
2550 mgp->tx.done == mgp->watchdog_tx_done) 2695 mgp->tx.done == mgp->watchdog_tx_done &&
2696 mgp->watchdog_tx_req != mgp->watchdog_tx_done)
2551 /* nic seems like it might be stuck.. */ 2697 /* nic seems like it might be stuck.. */
2552 schedule_work(&mgp->watchdog_work); 2698 schedule_work(&mgp->watchdog_work);
2553 else 2699 else
@@ -2556,6 +2702,7 @@ static void myri10ge_watchdog_timer(unsigned long arg)
2556 jiffies + myri10ge_watchdog_timeout * HZ); 2702 jiffies + myri10ge_watchdog_timeout * HZ);
2557 2703
2558 mgp->watchdog_tx_done = mgp->tx.done; 2704 mgp->watchdog_tx_done = mgp->tx.done;
2705 mgp->watchdog_tx_req = mgp->tx.req;
2559} 2706}
2560 2707
2561static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 2708static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -2583,6 +2730,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2583 mgp->csum_flag = MXGEFW_FLAGS_CKSUM; 2730 mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
2584 mgp->pause = myri10ge_flow_control; 2731 mgp->pause = myri10ge_flow_control;
2585 mgp->intr_coal_delay = myri10ge_intr_coal_delay; 2732 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
2733 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
2586 init_waitqueue_head(&mgp->down_wq); 2734 init_waitqueue_head(&mgp->down_wq);
2587 2735
2588 if (pci_enable_device(pdev)) { 2736 if (pci_enable_device(pdev)) {
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
index 0a6cae6cb186..9519ae7cd5ec 100644
--- a/drivers/net/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -91,7 +91,19 @@ struct mcp_kreq_ether_recv {
91 91
92/* Commands */ 92/* Commands */
93 93
94#define MXGEFW_CMD_OFFSET 0xf80000 94#define MXGEFW_BOOT_HANDOFF 0xfc0000
95#define MXGEFW_BOOT_DUMMY_RDMA 0xfc01c0
96
97#define MXGEFW_ETH_CMD 0xf80000
98#define MXGEFW_ETH_SEND_4 0x200000
99#define MXGEFW_ETH_SEND_1 0x240000
100#define MXGEFW_ETH_SEND_2 0x280000
101#define MXGEFW_ETH_SEND_3 0x2c0000
102#define MXGEFW_ETH_RECV_SMALL 0x300000
103#define MXGEFW_ETH_RECV_BIG 0x340000
104
105#define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000))
106#define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4)
95 107
96enum myri10ge_mcp_cmd_type { 108enum myri10ge_mcp_cmd_type {
97 MXGEFW_CMD_NONE = 0, 109 MXGEFW_CMD_NONE = 0,
@@ -154,7 +166,7 @@ enum myri10ge_mcp_cmd_type {
154 MXGEFW_CMD_SET_MTU, 166 MXGEFW_CMD_SET_MTU,
155 MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */ 167 MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, /* in microseconds */
156 MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */ 168 MXGEFW_CMD_SET_STATS_INTERVAL, /* in microseconds */
157 MXGEFW_CMD_SET_STATS_DMA, 169 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, /* replaced by SET_STATS_DMA_V2 */
158 170
159 MXGEFW_ENABLE_PROMISC, 171 MXGEFW_ENABLE_PROMISC,
160 MXGEFW_DISABLE_PROMISC, 172 MXGEFW_DISABLE_PROMISC,
@@ -168,7 +180,26 @@ enum myri10ge_mcp_cmd_type {
168 * data2 = RDMA length (MSH), WDMA length (LSH) 180 * data2 = RDMA length (MSH), WDMA length (LSH)
169 * command return data = repetitions (MSH), 0.5-ms ticks (LSH) 181 * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
170 */ 182 */
171 MXGEFW_DMA_TEST 183 MXGEFW_DMA_TEST,
184
185 MXGEFW_ENABLE_ALLMULTI,
186 MXGEFW_DISABLE_ALLMULTI,
187
188 /* returns MXGEFW_CMD_ERROR_MULTICAST
189 * if there is no room in the cache
190 * data0,MSH(data1) = multicast group address */
191 MXGEFW_JOIN_MULTICAST_GROUP,
192 /* returns MXGEFW_CMD_ERROR_MULTICAST
193 * if the address is not in the cache,
194 * or is equal to FF-FF-FF-FF-FF-FF
195 * data0,MSH(data1) = multicast group address */
196 MXGEFW_LEAVE_MULTICAST_GROUP,
197 MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
198
199 MXGEFW_CMD_SET_STATS_DMA_V2,
200 /* data0, data1 = bus addr,
201 * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows
202 * adding new stuff to mcp_irq_data without changing the ABI */
172}; 203};
173 204
174enum myri10ge_mcp_cmd_status { 205enum myri10ge_mcp_cmd_status {
@@ -180,11 +211,17 @@ enum myri10ge_mcp_cmd_status {
180 MXGEFW_CMD_ERROR_CLOSED, 211 MXGEFW_CMD_ERROR_CLOSED,
181 MXGEFW_CMD_ERROR_HASH_ERROR, 212 MXGEFW_CMD_ERROR_HASH_ERROR,
182 MXGEFW_CMD_ERROR_BAD_PORT, 213 MXGEFW_CMD_ERROR_BAD_PORT,
183 MXGEFW_CMD_ERROR_RESOURCES 214 MXGEFW_CMD_ERROR_RESOURCES,
215 MXGEFW_CMD_ERROR_MULTICAST
184}; 216};
185 217
186/* 40 Bytes */ 218#define MXGEFW_OLD_IRQ_DATA_LEN 40
219
187struct mcp_irq_data { 220struct mcp_irq_data {
221 /* add new counters at the beginning */
222 u32 future_use[5];
223 u32 dropped_multicast_filtered;
224 /* 40 Bytes */
188 u32 send_done_count; 225 u32 send_done_count;
189 226
190 u32 link_up; 227 u32 link_up;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index db0475a1102f..2a467778efc7 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -54,8 +54,8 @@
54#include <asm/uaccess.h> 54#include <asm/uaccess.h>
55 55
56#define DRV_NAME "natsemi" 56#define DRV_NAME "natsemi"
57#define DRV_VERSION "2.0" 57#define DRV_VERSION "2.1"
58#define DRV_RELDATE "June 27, 2006" 58#define DRV_RELDATE "Sept 11, 2006"
59 59
60#define RX_OFFSET 2 60#define RX_OFFSET 2
61 61
@@ -2387,9 +2387,6 @@ static void __set_rx_mode(struct net_device *dev)
2387 u32 rx_mode; 2387 u32 rx_mode;
2388 2388
2389 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2389 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2390 /* Unconditionally log net taps. */
2391 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
2392 dev->name);
2393 rx_mode = RxFilterEnable | AcceptBroadcast 2390 rx_mode = RxFilterEnable | AcceptBroadcast
2394 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys; 2391 | AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2395 } else if ((dev->mc_count > multicast_filter_limit) 2392 } else if ((dev->mc_count > multicast_filter_limit)
@@ -3246,7 +3243,7 @@ static int __init natsemi_init_mod (void)
3246 printk(version); 3243 printk(version);
3247#endif 3244#endif
3248 3245
3249 return pci_module_init (&natsemi_driver); 3246 return pci_register_driver(&natsemi_driver);
3250} 3247}
3251 3248
3252static void __exit natsemi_exit_mod (void) 3249static void __exit natsemi_exit_mod (void)
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 34bdba9eec79..654b477b570a 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -702,7 +702,7 @@ static int __init ne2k_pci_init(void)
702#ifdef MODULE 702#ifdef MODULE
703 printk(version); 703 printk(version);
704#endif 704#endif
705 return pci_module_init (&ne2k_driver); 705 return pci_register_driver(&ne2k_driver);
706} 706}
707 707
708 708
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b1311ae82675..30ed9a5a40e0 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -17,7 +17,6 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#include <linux/config.h>
21#include <linux/init.h> 20#include <linux/init.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/kernel.h> 22#include <linux/kernel.h>
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index fa854c8fde75..4d52ecf8af56 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
1323MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); 1323MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
1324MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); 1324MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
1325 1325
1326int init_module(void) 1326int __init init_module(void)
1327{ 1327{
1328 if(io <= 0x0 || !memend || !memstart || irq < 2) { 1328 if(io <= 0x0 || !memend || !memstart || irq < 2) {
1329 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1329 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index bb42ff218484..810cc572f5f7 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1253MODULE_PARM_DESC(io, "ni6510 I/O base address"); 1253MODULE_PARM_DESC(io, "ni6510 I/O base address");
1254MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); 1254MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1255 1255
1256int init_module(void) 1256int __init init_module(void)
1257{ 1257{
1258 dev_ni65 = ni65_probe(-1); 1258 dev_ni65 = ni65_probe(-1);
1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; 1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 0e76859c90a2..0dedd34804c3 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -2178,7 +2178,7 @@ static struct pci_driver driver = {
2178static int __init ns83820_init(void) 2178static int __init ns83820_init(void)
2179{ 2179{
2180 printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n"); 2180 printk(KERN_INFO "ns83820.c: National Semiconductor DP83820 10/100/1000 driver.\n");
2181 return pci_module_init(&driver); 2181 return pci_register_driver(&driver);
2182} 2182}
2183 2183
2184static void __exit ns83820_exit(void) 2184static void __exit ns83820_exit(void)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index e0e293964042..dea843a62d32 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -98,7 +98,7 @@ IVc. Errata
98#include <linux/crc32.h> 98#include <linux/crc32.h>
99#include <asm/io.h> 99#include <asm/io.h>
100 100
101#define NETDRV_VERSION "1.0.0" 101#define NETDRV_VERSION "1.0.1"
102#define MODNAME "netdrv" 102#define MODNAME "netdrv"
103#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded" 103#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
104#define PFX MODNAME ": " 104#define PFX MODNAME ": "
@@ -1853,9 +1853,6 @@ static void netdrv_set_rx_mode (struct net_device *dev)
1853 1853
1854 /* Note: do not reorder, GCC is clever about common statements. */ 1854 /* Note: do not reorder, GCC is clever about common statements. */
1855 if (dev->flags & IFF_PROMISC) { 1855 if (dev->flags & IFF_PROMISC) {
1856 /* Unconditionally log net taps. */
1857 printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1858 dev->name);
1859 rx_mode = 1856 rx_mode =
1860 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 1857 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
1861 AcceptAllPhys; 1858 AcceptAllPhys;
@@ -1963,7 +1960,7 @@ static int __init netdrv_init_module (void)
1963#ifdef MODULE 1960#ifdef MODULE
1964 printk(version); 1961 printk(version);
1965#endif 1962#endif
1966 return pci_module_init (&netdrv_pci_driver); 1963 return pci_register_driver(&netdrv_pci_driver);
1967} 1964}
1968 1965
1969 1966
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 297e9f805366..c54f6a7ebf31 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -771,6 +771,7 @@ static struct pcmcia_device_id axnet_ids[] = {
771 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), 771 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
772 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106), 772 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1106),
773 PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab), 773 PCMCIA_DEVICE_MANF_CARD(0x8a01, 0xc1ab),
774 PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202),
774 PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc), 775 PCMCIA_DEVICE_PROD_ID12("AmbiCom,Inc.", "Fast Ethernet PC Card(AMB8110)", 0x49b020a7, 0x119cc9fc),
775 PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef), 776 PCMCIA_DEVICE_PROD_ID124("Fast Ethernet", "16-bit PC Card", "AX88190", 0xb4be14e3, 0x9a12eb6a, 0xab9be5ef),
776 PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef), 777 PCMCIA_DEVICE_PROD_ID12("ASIX", "AX88190", 0x0959823b, 0xab9be5ef),
@@ -786,8 +787,6 @@ static struct pcmcia_device_id axnet_ids[] = {
786 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116), 787 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
787 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058), 788 PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
788 PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef), 789 PCMCIA_DEVICE_PROD_ID14("Network Everywhere", "AX88190", 0x820a67b6, 0xab9be5ef),
789 /* this is not specific enough */
790 /* PCMCIA_DEVICE_MANF_CARD(0x021b, 0x0202), */
791 PCMCIA_DEVICE_NULL, 790 PCMCIA_DEVICE_NULL,
792}; 791};
793MODULE_DEVICE_TABLE(pcmcia, axnet_ids); 792MODULE_DEVICE_TABLE(pcmcia, axnet_ids);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index ea93b8f18605..74211af0e0c9 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -29,7 +29,7 @@
29======================================================================*/ 29======================================================================*/
30 30
31#define DRV_NAME "fmvj18x_cs" 31#define DRV_NAME "fmvj18x_cs"
32#define DRV_VERSION "2.8" 32#define DRV_VERSION "2.9"
33 33
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/kernel.h> 35#include <linux/kernel.h>
@@ -1193,8 +1193,6 @@ static void set_rx_mode(struct net_device *dev)
1193 outb(CONFIG0_RST_1, ioaddr + CONFIG_0); 1193 outb(CONFIG0_RST_1, ioaddr + CONFIG_0);
1194 1194
1195 if (dev->flags & IFF_PROMISC) { 1195 if (dev->flags & IFF_PROMISC) {
1196 /* Unconditionally log net taps. */
1197 printk("%s: Promiscuous mode enabled.\n", dev->name);
1198 memset(mc_filter, 0xff, sizeof(mc_filter)); 1196 memset(mc_filter, 0xff, sizeof(mc_filter));
1199 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */ 1197 outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
1200 } else if (dev->mc_count > MC_FILTERBREAK 1198 } else if (dev->mc_count > MC_FILTERBREAK
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 0ecebfc31f07..cc0dcc9bf636 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -654,11 +654,8 @@ static int pcnet_config(struct pcmcia_device *link)
654 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 654 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
655 655
656 if (info->flags & (IS_DL10019|IS_DL10022)) { 656 if (info->flags & (IS_DL10019|IS_DL10022)) {
657 u_char id = inb(dev->base_addr + 0x1a);
658 dev->do_ioctl = &ei_ioctl; 657 dev->do_ioctl = &ei_ioctl;
659 mii_phy_probe(dev); 658 mii_phy_probe(dev);
660 if ((id == 0x30) && !info->pna_phy && (info->eth_phy == 4))
661 info->eth_phy = 0;
662 } 659 }
663 660
664 link->dev_node = &info->node; 661 link->dev_node = &info->node;
@@ -821,15 +818,6 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
821 } 818 }
822} 819}
823 820
824static void mdio_reset(kio_addr_t addr, int phy_id)
825{
826 outb_p(0x08, addr);
827 outb_p(0x0c, addr);
828 outb_p(0x08, addr);
829 outb_p(0x0c, addr);
830 outb_p(0x00, addr);
831}
832
833/*====================================================================== 821/*======================================================================
834 822
835 EEPROM access routines for DL10019 and DL10022 based cards 823 EEPROM access routines for DL10019 and DL10022 based cards
@@ -942,7 +930,8 @@ static void set_misc_reg(struct net_device *dev)
942 } 930 }
943 if (info->flags & IS_DL10022) { 931 if (info->flags & IS_DL10022) {
944 if (info->flags & HAS_MII) { 932 if (info->flags & HAS_MII) {
945 mdio_reset(nic_base + DLINK_GPIO, info->eth_phy); 933 /* Advertise 100F, 100H, 10F, 10H */
934 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 4, 0x01e1);
946 /* Restart MII autonegotiation */ 935 /* Restart MII autonegotiation */
947 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000); 936 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x0000);
948 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200); 937 mdio_write(nic_base + DLINK_GPIO, info->eth_phy, 0, 0x1200);
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index a73d54553030..3fb369f2e7ed 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -80,14 +80,14 @@ INT_MODULE_PARM(if_port, 0);
80#ifdef PCMCIA_DEBUG 80#ifdef PCMCIA_DEBUG
81INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG); 81INT_MODULE_PARM(pc_debug, PCMCIA_DEBUG);
82static const char *version = 82static const char *version =
83"smc91c92_cs.c 0.09 1996/8/4 Donald Becker, becker@scyld.com.\n"; 83"smc91c92_cs.c 1.123 2006/11/09 Donald Becker, becker@scyld.com.\n";
84#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args) 84#define DEBUG(n, args...) if (pc_debug>(n)) printk(KERN_DEBUG args)
85#else 85#else
86#define DEBUG(n, args...) 86#define DEBUG(n, args...)
87#endif 87#endif
88 88
89#define DRV_NAME "smc91c92_cs" 89#define DRV_NAME "smc91c92_cs"
90#define DRV_VERSION "1.122" 90#define DRV_VERSION "1.123"
91 91
92/*====================================================================*/ 92/*====================================================================*/
93 93
@@ -1780,7 +1780,6 @@ static void set_rx_mode(struct net_device *dev)
1780 u_short rx_cfg_setting; 1780 u_short rx_cfg_setting;
1781 1781
1782 if (dev->flags & IFF_PROMISC) { 1782 if (dev->flags & IFF_PROMISC) {
1783 printk(KERN_NOTICE "%s: setting Rx mode to promiscuous.\n", dev->name);
1784 rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti; 1783 rx_cfg_setting = RxStripCRC | RxEnable | RxPromisc | RxAllMulti;
1785 } else if (dev->flags & IFF_ALLMULTI) 1784 } else if (dev->flags & IFF_ALLMULTI)
1786 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; 1785 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 9bae77ce1314..4122bb46f5ff 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -345,6 +345,7 @@ typedef struct local_info_t {
345 void __iomem *dingo_ccr; /* only used for CEM56 cards */ 345 void __iomem *dingo_ccr; /* only used for CEM56 cards */
346 unsigned last_ptr_value; /* last packets transmitted value */ 346 unsigned last_ptr_value; /* last packets transmitted value */
347 const char *manf_str; 347 const char *manf_str;
348 struct work_struct tx_timeout_task;
348} local_info_t; 349} local_info_t;
349 350
350/**************** 351/****************
@@ -352,6 +353,7 @@ typedef struct local_info_t {
352 */ 353 */
353static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); 354static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
354static void do_tx_timeout(struct net_device *dev); 355static void do_tx_timeout(struct net_device *dev);
356static void xirc2ps_tx_timeout_task(void *data);
355static struct net_device_stats *do_get_stats(struct net_device *dev); 357static struct net_device_stats *do_get_stats(struct net_device *dev);
356static void set_addresses(struct net_device *dev); 358static void set_addresses(struct net_device *dev);
357static void set_multicast_list(struct net_device *dev); 359static void set_multicast_list(struct net_device *dev);
@@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link)
589#ifdef HAVE_TX_TIMEOUT 591#ifdef HAVE_TX_TIMEOUT
590 dev->tx_timeout = do_tx_timeout; 592 dev->tx_timeout = do_tx_timeout;
591 dev->watchdog_timeo = TX_TIMEOUT; 593 dev->watchdog_timeo = TX_TIMEOUT;
594 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
592#endif 595#endif
593 596
594 return xirc2ps_config(link); 597 return xirc2ps_config(link);
@@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1341/*====================================================================*/ 1344/*====================================================================*/
1342 1345
1343static void 1346static void
1344do_tx_timeout(struct net_device *dev) 1347xirc2ps_tx_timeout_task(void *data)
1345{ 1348{
1346 local_info_t *lp = netdev_priv(dev); 1349 struct net_device *dev = data;
1347 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
1348 lp->stats.tx_errors++;
1349 /* reset the card */ 1350 /* reset the card */
1350 do_reset(dev,1); 1351 do_reset(dev,1);
1351 dev->trans_start = jiffies; 1352 dev->trans_start = jiffies;
1352 netif_wake_queue(dev); 1353 netif_wake_queue(dev);
1353} 1354}
1354 1355
1356static void
1357do_tx_timeout(struct net_device *dev)
1358{
1359 local_info_t *lp = netdev_priv(dev);
1360 lp->stats.tx_errors++;
1361 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
1362 schedule_work(&lp->tx_timeout_task);
1363}
1364
1355static int 1365static int
1356do_start_xmit(struct sk_buff *skb, struct net_device *dev) 1366do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1357{ 1367{
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 4daafe303358..5e26fe806e21 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -202,6 +202,8 @@ static int homepna[MAX_UNITS];
202#define CSR15 15 202#define CSR15 15
203#define PCNET32_MC_FILTER 8 203#define PCNET32_MC_FILTER 8
204 204
205#define PCNET32_79C970A 0x2621
206
205/* The PCNET32 Rx and Tx ring descriptors. */ 207/* The PCNET32 Rx and Tx ring descriptors. */
206struct pcnet32_rx_head { 208struct pcnet32_rx_head {
207 u32 base; 209 u32 base;
@@ -289,6 +291,7 @@ struct pcnet32_private {
289 291
290 /* each bit indicates an available PHY */ 292 /* each bit indicates an available PHY */
291 u32 phymask; 293 u32 phymask;
294 unsigned short chip_version; /* which variant this is */
292}; 295};
293 296
294static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 297static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev)
724 spin_lock_irqsave(&lp->lock, flags); 727 spin_lock_irqsave(&lp->lock, flags);
725 if (lp->mii) { 728 if (lp->mii) {
726 r = mii_link_ok(&lp->mii_if); 729 r = mii_link_ok(&lp->mii_if);
727 } else { 730 } else if (lp->chip_version >= PCNET32_79C970A) {
728 ulong ioaddr = dev->base_addr; /* card base I/O address */ 731 ulong ioaddr = dev->base_addr; /* card base I/O address */
729 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); 732 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
733 } else { /* can not detect link on really old chips */
734 r = 1;
730 } 735 }
731 spin_unlock_irqrestore(&lp->lock, flags); 736 spin_unlock_irqrestore(&lp->lock, flags);
732 737
@@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1091 ulong ioaddr = dev->base_addr; 1096 ulong ioaddr = dev->base_addr;
1092 int ticks; 1097 int ticks;
1093 1098
1099 /* really old chips have to be stopped. */
1100 if (lp->chip_version < PCNET32_79C970A)
1101 return 0;
1102
1094 /* set SUSPEND (SPND) - CSR5 bit 0 */ 1103 /* set SUSPEND (SPND) - CSR5 bit 0 */
1095 csr5 = a->read_csr(ioaddr, CSR5); 1104 csr5 = a->read_csr(ioaddr, CSR5);
1096 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); 1105 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
@@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1529 lp->mii_if.reg_num_mask = 0x1f; 1538 lp->mii_if.reg_num_mask = 0x1f;
1530 lp->dxsuflo = dxsuflo; 1539 lp->dxsuflo = dxsuflo;
1531 lp->mii = mii; 1540 lp->mii = mii;
1541 lp->chip_version = chip_version;
1532 lp->msg_enable = pcnet32_debug; 1542 lp->msg_enable = pcnet32_debug;
1533 if ((cards_found >= MAX_UNITS) 1543 if ((cards_found >= MAX_UNITS)
1534 || (options[cards_found] > sizeof(options_mapping))) 1544 || (options[cards_found] > sizeof(options_mapping)))
@@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev)
1839 val |= 2; 1849 val |= 2;
1840 } else if (lp->options & PCNET32_PORT_ASEL) { 1850 } else if (lp->options & PCNET32_PORT_ASEL) {
1841 /* workaround of xSeries250, turn on for 79C975 only */ 1851 /* workaround of xSeries250, turn on for 79C975 only */
1842 i = ((lp->a.read_csr(ioaddr, 88) | 1852 if (lp->chip_version == 0x2627)
1843 (lp->a.
1844 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1845 if (i == 0x2627)
1846 val |= 3; 1853 val |= 3;
1847 } 1854 }
1848 lp->a.write_bcr(ioaddr, 9, val); 1855 lp->a.write_bcr(ioaddr, 9, val);
@@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev)
1986 1993
1987 netif_start_queue(dev); 1994 netif_start_queue(dev);
1988 1995
1989 /* Print the link status and start the watchdog */ 1996 if (lp->chip_version >= PCNET32_79C970A) {
1990 pcnet32_check_media(dev, 1); 1997 /* Print the link status and start the watchdog */
1991 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 1998 pcnet32_check_media(dev, 1);
1999 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2000 }
1992 2001
1993 i = 0; 2002 i = 0;
1994 while (i++ < 100) 2003 while (i++ < 100)
@@ -2969,7 +2978,7 @@ static int __init pcnet32_init_module(void)
2969 tx_start = tx_start_pt; 2978 tx_start = tx_start_pt;
2970 2979
2971 /* find the PCI devices */ 2980 /* find the PCI devices */
2972 if (!pci_module_init(&pcnet32_driver)) 2981 if (!pci_register_driver(&pcnet32_driver))
2973 pcnet32_have_pci = 1; 2982 pcnet32_have_pci = 1;
2974 2983
2975 /* should we find any remaining VLbus devices ? */ 2984 /* should we find any remaining VLbus devices ? */
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2ba6d3a40e2e..b79ec0d7480f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,5 +56,22 @@ config SMSC_PHY
56 ---help--- 56 ---help---
57 Currently supports the LAN83C185 PHY 57 Currently supports the LAN83C185 PHY
58 58
59config FIXED_PHY
60 tristate "Drivers for PHY emulation on fixed speed/link"
61 depends on PHYLIB
62 ---help---
63 Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
64 but with the ability to manipulate with speed/link in software. The relavant MII
65 speed/duplex parameters could be effectively handled in user-specified fuction.
66 Currently tested with mpc866ads.
67
68config FIXED_MII_10_FDX
69 bool "Emulation for 10M Fdx fixed PHY behavior"
70 depends on FIXED_PHY
71
72config FIXED_MII_100_FDX
73 bool "Emulation for 100M Fdx fixed PHY behavior"
74 depends on FIXED_PHY
75
59endmenu 76endmenu
60 77
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a00e61942525..320f8323123f 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
10obj-$(CONFIG_QSEMI_PHY) += qsemi.o 10obj-$(CONFIG_QSEMI_PHY) += qsemi.o
11obj-$(CONFIG_SMSC_PHY) += smsc.o 11obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
new file mode 100644
index 000000000000..341036df4710
--- /dev/null
+++ b/drivers/net/phy/fixed.c
@@ -0,0 +1,358 @@
1/*
2 * drivers/net/phy/fixed.c
3 *
4 * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode.
5 *
6 * Author: Vitaly Bordug
7 *
8 * Copyright (c) 2006 MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/phy.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/uaccess.h>
39
40#define MII_REGS_NUM 7
41
42/*
43 The idea is to emulate normal phy behavior by responding with
44 pre-defined values to mii BMCR read, so that read_status hook could
45 take all the needed info.
46*/
47
48struct fixed_phy_status {
49 u8 link;
50 u16 speed;
51 u8 duplex;
52};
53
54/*-----------------------------------------------------------------------------
55 * Private information hoder for mii_bus
56 *-----------------------------------------------------------------------------*/
57struct fixed_info {
58 u16 *regs;
59 u8 regs_num;
60 struct fixed_phy_status phy_status;
61 struct phy_device *phydev; /* pointer to the container */
62 /* link & speed cb */
63 int(*link_update)(struct net_device*, struct fixed_phy_status*);
64
65};
66
67/*-----------------------------------------------------------------------------
68 * If something weird is required to be done with link/speed,
69 * network driver is able to assign a function to implement this.
70 * May be useful for PHY's that need to be software-driven.
71 *-----------------------------------------------------------------------------*/
72int fixed_mdio_set_link_update(struct phy_device* phydev,
73 int(*link_update)(struct net_device*, struct fixed_phy_status*))
74{
75 struct fixed_info *fixed;
76
77 if(link_update == NULL)
78 return -EINVAL;
79
80 if(phydev) {
81 if(phydev->bus) {
82 fixed = phydev->bus->priv;
83 fixed->link_update = link_update;
84 return 0;
85 }
86 }
87 return -EINVAL;
88}
89EXPORT_SYMBOL(fixed_mdio_set_link_update);
90
91/*-----------------------------------------------------------------------------
92 * This is used for updating internal mii regs from the status
93 *-----------------------------------------------------------------------------*/
94static int fixed_mdio_update_regs(struct fixed_info *fixed)
95{
96 u16 *regs = fixed->regs;
97 u16 bmsr = 0;
98 u16 bmcr = 0;
99
100 if(!regs) {
101 printk(KERN_ERR "%s: regs not set up", __FUNCTION__);
102 return -EINVAL;
103 }
104
105 if(fixed->phy_status.link)
106 bmsr |= BMSR_LSTATUS;
107
108 if(fixed->phy_status.duplex) {
109 bmcr |= BMCR_FULLDPLX;
110
111 switch ( fixed->phy_status.speed ) {
112 case 100:
113 bmsr |= BMSR_100FULL;
114 bmcr |= BMCR_SPEED100;
115 break;
116
117 case 10:
118 bmsr |= BMSR_10FULL;
119 break;
120 }
121 } else {
122 switch ( fixed->phy_status.speed ) {
123 case 100:
124 bmsr |= BMSR_100HALF;
125 bmcr |= BMCR_SPEED100;
126 break;
127
128 case 10:
129 bmsr |= BMSR_100HALF;
130 break;
131 }
132 }
133
134 regs[MII_BMCR] = bmcr;
135 regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/
136
137 return 0;
138}
139
140static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location)
141{
142 struct fixed_info *fixed = bus->priv;
143
144 /* if user has registered link update callback, use it */
145 if(fixed->phydev)
146 if(fixed->phydev->attached_dev) {
147 if(fixed->link_update) {
148 fixed->link_update(fixed->phydev->attached_dev,
149 &fixed->phy_status);
150 fixed_mdio_update_regs(fixed);
151 }
152 }
153
154 if ((unsigned int)location >= fixed->regs_num)
155 return -1;
156 return fixed->regs[location];
157}
158
159static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
160{
161 /* do nothing for now*/
162 return 0;
163}
164
165static int fixed_mii_reset(struct mii_bus *bus)
166{
167 /*nothing here - no way/need to reset it*/
168 return 0;
169}
170
171static int fixed_config_aneg(struct phy_device *phydev)
172{
173 /* :TODO:03/13/2006 09:45:37 PM::
174 The full autoneg funcionality can be emulated,
175 but no need to have anything here for now
176 */
177 return 0;
178}
179
180/*-----------------------------------------------------------------------------
181 * the manual bind will do the magic - with phy_id_mask == 0
182 * match will never return true...
183 *-----------------------------------------------------------------------------*/
184static struct phy_driver fixed_mdio_driver = {
185 .name = "Fixed PHY",
186 .features = PHY_BASIC_FEATURES,
187 .config_aneg = fixed_config_aneg,
188 .read_status = genphy_read_status,
189 .driver = { .owner = THIS_MODULE,},
190};
191
192/*-----------------------------------------------------------------------------
193 * This func is used to create all the necessary stuff, bind
194 * the fixed phy driver and register all it on the mdio_bus_type.
195 * speed is either 10 or 100, duplex is boolean.
196 * number is used to create multiple fixed PHYs, so that several devices can
197 * utilize them simultaneously.
198 *-----------------------------------------------------------------------------*/
199static int fixed_mdio_register_device(int number, int speed, int duplex)
200{
201 struct mii_bus *new_bus;
202 struct fixed_info *fixed;
203 struct phy_device *phydev;
204 int err = 0;
205
206 struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL);
207
208 if (NULL == dev)
209 return -ENOMEM;
210
211 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
212
213 if (NULL == new_bus) {
214 kfree(dev);
215 return -ENOMEM;
216 }
217 fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL);
218
219 if (NULL == fixed) {
220 kfree(dev);
221 kfree(new_bus);
222 return -ENOMEM;
223 }
224
225 fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL);
226 fixed->regs_num = MII_REGS_NUM;
227 fixed->phy_status.speed = speed;
228 fixed->phy_status.duplex = duplex;
229 fixed->phy_status.link = 1;
230
231 new_bus->name = "Fixed MII Bus",
232 new_bus->read = &fixed_mii_read,
233 new_bus->write = &fixed_mii_write,
234 new_bus->reset = &fixed_mii_reset,
235
236 /*set up workspace*/
237 fixed_mdio_update_regs(fixed);
238 new_bus->priv = fixed;
239
240 new_bus->dev = dev;
241 dev_set_drvdata(dev, new_bus);
242
243 /* create phy_device and register it on the mdio bus */
244 phydev = phy_device_create(new_bus, 0, 0);
245
246 /*
247 Put the phydev pointer into the fixed pack so that bus read/write code could
248 be able to access for instance attached netdev. Well it doesn't have to do
249 so, only in case of utilizing user-specified link-update...
250 */
251 fixed->phydev = phydev;
252
253 if(NULL == phydev) {
254 err = -ENOMEM;
255 goto device_create_fail;
256 }
257
258 phydev->irq = -1;
259 phydev->dev.bus = &mdio_bus_type;
260
261 if(number)
262 snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
263 "fixed_%d@%d:%d", number, speed, duplex);
264 else
265 snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
266 "fixed@%d:%d", speed, duplex);
267 phydev->bus = new_bus;
268
269 err = device_register(&phydev->dev);
270 if(err) {
271 printk(KERN_ERR "Phy %s failed to register\n",
272 phydev->dev.bus_id);
273 goto bus_register_fail;
274 }
275
276 /*
277 the mdio bus has phy_id match... In order not to do it
278 artificially, we are binding the driver here by hand;
279 it will be the same for all the fixed phys anyway.
280 */
281 down_write(&phydev->dev.bus->subsys.rwsem);
282
283 phydev->dev.driver = &fixed_mdio_driver.driver;
284
285 err = phydev->dev.driver->probe(&phydev->dev);
286 if(err < 0) {
287 printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id);
288 up_write(&phydev->dev.bus->subsys.rwsem);
289 goto probe_fail;
290 }
291
292 device_bind_driver(&phydev->dev);
293 up_write(&phydev->dev.bus->subsys.rwsem);
294
295 return 0;
296
297probe_fail:
298 device_unregister(&phydev->dev);
299bus_register_fail:
300 kfree(phydev);
301device_create_fail:
302 kfree(dev);
303 kfree(new_bus);
304 kfree(fixed);
305
306 return err;
307}
308
309
310MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
311MODULE_AUTHOR("Vitaly Bordug");
312MODULE_LICENSE("GPL");
313
314static int __init fixed_init(void)
315{
316 int ret;
317 int duplex = 0;
318
319 /* register on the bus... Not expected to be matched with anything there... */
320 phy_driver_register(&fixed_mdio_driver);
321
322 /* So let the fun begin...
323 We will create several mdio devices here, and will bound the upper
324 driver to them.
325
326 Then the external software can lookup the phy bus by searching
327 fixed@speed:duplex, e.g. fixed@100:1, to be connected to the
328 virtual 100M Fdx phy.
329
330 In case several virtual PHYs required, the bus_id will be in form
331 fixed_<num>@<speed>:<duplex>, which make it able even to define
332 driver-specific link control callback, if for instance PHY is completely
333 SW-driven.
334
335 */
336
337#ifdef CONFIG_FIXED_MII_DUPLEX
338 duplex = 1;
339#endif
340
341#ifdef CONFIG_FIXED_MII_100_FDX
342 fixed_mdio_register_device(0, 100, 1);
343#endif
344
345#ifdef CONFIX_FIXED_MII_10_FDX
346 fixed_mdio_register_device(0, 10, 1);
347#endif
348 return 0;
349}
350
351static void __exit fixed_exit(void)
352{
353 phy_driver_unregister(&fixed_mdio_driver);
354 /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */
355}
356
357module_init(fixed_init);
358module_exit(fixed_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 1dde390c164d..cf6660c93ffa 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = {
159 .suspend = mdio_bus_suspend, 159 .suspend = mdio_bus_suspend,
160 .resume = mdio_bus_resume, 160 .resume = mdio_bus_resume,
161}; 161};
162EXPORT_SYMBOL(mdio_bus_type);
162 163
163int __init mdio_bus_init(void) 164int __init mdio_bus_init(void)
164{ 165{
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7d5c2233c252..f5aad77288f9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -419,9 +419,8 @@ void phy_start_machine(struct phy_device *phydev,
419 419
420/* phy_stop_machine 420/* phy_stop_machine
421 * 421 *
422 * description: Stops the state machine timer, sets the state to 422 * description: Stops the state machine timer, sets the state to UP
423 * UP (unless it wasn't up yet), and then frees the interrupt, 423 * (unless it wasn't up yet). This function must be called BEFORE
424 * if it is in use. This function must be called BEFORE
425 * phy_detach. 424 * phy_detach.
426 */ 425 */
427void phy_stop_machine(struct phy_device *phydev) 426void phy_stop_machine(struct phy_device *phydev)
@@ -433,9 +432,6 @@ void phy_stop_machine(struct phy_device *phydev)
433 phydev->state = PHY_UP; 432 phydev->state = PHY_UP;
434 spin_unlock(&phydev->lock); 433 spin_unlock(&phydev->lock);
435 434
436 if (phydev->irq != PHY_POLL)
437 phy_stop_interrupts(phydev);
438
439 phydev->adjust_state = NULL; 435 phydev->adjust_state = NULL;
440} 436}
441 437
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1bc1e032c5d6..2d1ecfdc80db 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,35 @@ static struct phy_driver genphy_driver;
45extern int mdio_bus_init(void); 45extern int mdio_bus_init(void);
46extern void mdio_bus_exit(void); 46extern void mdio_bus_exit(void);
47 47
48struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
49{
50 struct phy_device *dev;
51 /* We allocate the device, and initialize the
52 * default values */
53 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
54
55 if (NULL == dev)
56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
57
58 dev->speed = 0;
59 dev->duplex = -1;
60 dev->pause = dev->asym_pause = 0;
61 dev->link = 1;
62
63 dev->autoneg = AUTONEG_ENABLE;
64
65 dev->addr = addr;
66 dev->phy_id = phy_id;
67 dev->bus = bus;
68
69 dev->state = PHY_DOWN;
70
71 spin_lock_init(&dev->lock);
72
73 return dev;
74}
75EXPORT_SYMBOL(phy_device_create);
76
48/* get_phy_device 77/* get_phy_device
49 * 78 *
50 * description: Reads the ID registers of the PHY at addr on the 79 * description: Reads the ID registers of the PHY at addr on the
@@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
78 if (0xffffffff == phy_id) 107 if (0xffffffff == phy_id)
79 return NULL; 108 return NULL;
80 109
81 /* Otherwise, we allocate the device, and initialize the 110 dev = phy_device_create(bus, addr, phy_id);
82 * default values */
83 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
84
85 if (NULL == dev)
86 return ERR_PTR(-ENOMEM);
87
88 dev->speed = 0;
89 dev->duplex = -1;
90 dev->pause = dev->asym_pause = 0;
91 dev->link = 1;
92
93 dev->autoneg = AUTONEG_ENABLE;
94
95 dev->addr = addr;
96 dev->phy_id = phy_id;
97 dev->bus = bus;
98
99 dev->state = PHY_DOWN;
100
101 spin_lock_init(&dev->lock);
102 111
103 return dev; 112 return dev;
104} 113}
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 25e31fb5cb31..b1d8ed40ad98 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -14,7 +14,6 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/config.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/mii.h> 19#include <linux/mii.h>
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index ffd215d9a9be..792716beb052 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -12,7 +12,6 @@
12 * 12 *
13 */ 13 */
14 14
15#include <linux/config.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/mii.h> 17#include <linux/mii.h>
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 0ec6e9d57b94..c872f7c6cce3 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -192,7 +192,7 @@ struct cardmap {
192 void *ptr[CARDMAP_WIDTH]; 192 void *ptr[CARDMAP_WIDTH];
193}; 193};
194static void *cardmap_get(struct cardmap *map, unsigned int nr); 194static void *cardmap_get(struct cardmap *map, unsigned int nr);
195static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr); 195static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
196static unsigned int cardmap_find_first_free(struct cardmap *map); 196static unsigned int cardmap_find_first_free(struct cardmap *map);
197static void cardmap_destroy(struct cardmap **map); 197static void cardmap_destroy(struct cardmap **map);
198 198
@@ -1995,10 +1995,9 @@ ppp_register_channel(struct ppp_channel *chan)
1995{ 1995{
1996 struct channel *pch; 1996 struct channel *pch;
1997 1997
1998 pch = kmalloc(sizeof(struct channel), GFP_KERNEL); 1998 pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
1999 if (pch == 0) 1999 if (pch == 0)
2000 return -ENOMEM; 2000 return -ENOMEM;
2001 memset(pch, 0, sizeof(struct channel));
2002 pch->ppp = NULL; 2001 pch->ppp = NULL;
2003 pch->chan = chan; 2002 pch->chan = chan;
2004 chan->ppp = pch; 2003 chan->ppp = pch;
@@ -2408,13 +2407,12 @@ ppp_create_interface(int unit, int *retp)
2408 int ret = -ENOMEM; 2407 int ret = -ENOMEM;
2409 int i; 2408 int i;
2410 2409
2411 ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL); 2410 ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
2412 if (!ppp) 2411 if (!ppp)
2413 goto out; 2412 goto out;
2414 dev = alloc_netdev(0, "", ppp_setup); 2413 dev = alloc_netdev(0, "", ppp_setup);
2415 if (!dev) 2414 if (!dev)
2416 goto out1; 2415 goto out1;
2417 memset(ppp, 0, sizeof(struct ppp));
2418 2416
2419 ppp->mru = PPP_MRU; 2417 ppp->mru = PPP_MRU;
2420 init_ppp_file(&ppp->file, INTERFACE); 2418 init_ppp_file(&ppp->file, INTERFACE);
@@ -2454,11 +2452,16 @@ ppp_create_interface(int unit, int *retp)
2454 } 2452 }
2455 2453
2456 atomic_inc(&ppp_unit_count); 2454 atomic_inc(&ppp_unit_count);
2457 cardmap_set(&all_ppp_units, unit, ppp); 2455 ret = cardmap_set(&all_ppp_units, unit, ppp);
2456 if (ret != 0)
2457 goto out3;
2458
2458 mutex_unlock(&all_ppp_mutex); 2459 mutex_unlock(&all_ppp_mutex);
2459 *retp = 0; 2460 *retp = 0;
2460 return ppp; 2461 return ppp;
2461 2462
2463out3:
2464 atomic_dec(&ppp_unit_count);
2462out2: 2465out2:
2463 mutex_unlock(&all_ppp_mutex); 2466 mutex_unlock(&all_ppp_mutex);
2464 free_netdev(dev); 2467 free_netdev(dev);
@@ -2695,7 +2698,7 @@ static void *cardmap_get(struct cardmap *map, unsigned int nr)
2695 return NULL; 2698 return NULL;
2696} 2699}
2697 2700
2698static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr) 2701static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2699{ 2702{
2700 struct cardmap *p; 2703 struct cardmap *p;
2701 int i; 2704 int i;
@@ -2704,8 +2707,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2704 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) { 2707 if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
2705 do { 2708 do {
2706 /* need a new top level */ 2709 /* need a new top level */
2707 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2710 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2708 memset(np, 0, sizeof(*np)); 2711 if (!np)
2712 goto enomem;
2709 np->ptr[0] = p; 2713 np->ptr[0] = p;
2710 if (p != NULL) { 2714 if (p != NULL) {
2711 np->shift = p->shift + CARDMAP_ORDER; 2715 np->shift = p->shift + CARDMAP_ORDER;
@@ -2719,8 +2723,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2719 while (p->shift > 0) { 2723 while (p->shift > 0) {
2720 i = (nr >> p->shift) & CARDMAP_MASK; 2724 i = (nr >> p->shift) & CARDMAP_MASK;
2721 if (p->ptr[i] == NULL) { 2725 if (p->ptr[i] == NULL) {
2722 struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL); 2726 struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
2723 memset(np, 0, sizeof(*np)); 2727 if (!np)
2728 goto enomem;
2724 np->shift = p->shift - CARDMAP_ORDER; 2729 np->shift = p->shift - CARDMAP_ORDER;
2725 np->parent = p; 2730 np->parent = p;
2726 p->ptr[i] = np; 2731 p->ptr[i] = np;
@@ -2735,6 +2740,9 @@ static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
2735 set_bit(i, &p->inuse); 2740 set_bit(i, &p->inuse);
2736 else 2741 else
2737 clear_bit(i, &p->inuse); 2742 clear_bit(i, &p->inuse);
2743 return 0;
2744 enomem:
2745 return -ENOMEM;
2738} 2746}
2739 2747
2740static unsigned int cardmap_find_first_free(struct cardmap *map) 2748static unsigned int cardmap_find_first_free(struct cardmap *map)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
new file mode 100644
index 000000000000..c729aeeb4696
--- /dev/null
+++ b/drivers/net/qla3xxx.c
@@ -0,0 +1,3537 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dmapool.h>
18#include <linux/mempool.h>
19#include <linux/spinlock.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
25#include <linux/if_arp.h>
26#include <linux/if_ether.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/if_vlan.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/mm.h>
36
37#include "qla3xxx.h"
38
39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.02.00-k36"
42#define PFX DRV_NAME " "
43
44static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION;
46
47MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL");
50MODULE_VERSION(DRV_VERSION);
51
52static const u32 default_msg
53 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
54 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
55
56static int debug = -1; /* defaults above */
57module_param(debug, int, 0);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
60static int msi;
61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 /* required last entry */
67 {0,}
68};
69
70MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
71
72/*
73 * Caller must take hw_lock.
74 */
75static int ql_sem_spinlock(struct ql3_adapter *qdev,
76 u32 sem_mask, u32 sem_bits)
77{
78 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
79 u32 value;
80 unsigned int seconds = 3;
81
82 do {
83 writel((sem_mask | sem_bits),
84 &port_regs->CommonRegs.semaphoreReg);
85 value = readl(&port_regs->CommonRegs.semaphoreReg);
86 if ((value & (sem_mask >> 16)) == sem_bits)
87 return 0;
88 ssleep(1);
89 } while(--seconds);
90 return -1;
91}
92
93static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
94{
95 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
96 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
97 readl(&port_regs->CommonRegs.semaphoreReg);
98}
99
100static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
101{
102 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
103 u32 value;
104
105 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
106 value = readl(&port_regs->CommonRegs.semaphoreReg);
107 return ((value & (sem_mask >> 16)) == sem_bits);
108}
109
110/*
111 * Caller holds hw_lock.
112 */
113static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
114{
115 int i = 0;
116
117 while (1) {
118 if (!ql_sem_lock(qdev,
119 QL_DRVR_SEM_MASK,
120 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
121 * 2) << 1)) {
122 if (i < 10) {
123 ssleep(1);
124 i++;
125 } else {
126 printk(KERN_ERR PFX "%s: Timed out waiting for "
127 "driver lock...\n",
128 qdev->ndev->name);
129 return 0;
130 }
131 } else {
132 printk(KERN_DEBUG PFX
133 "%s: driver lock acquired.\n",
134 qdev->ndev->name);
135 return 1;
136 }
137 }
138}
139
140static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
141{
142 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
143
144 writel(((ISP_CONTROL_NP_MASK << 16) | page),
145 &port_regs->CommonRegs.ispControlStatus);
146 readl(&port_regs->CommonRegs.ispControlStatus);
147 qdev->current_page = page;
148}
149
150static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
151 u32 __iomem * reg)
152{
153 u32 value;
154 unsigned long hw_flags;
155
156 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
157 value = readl(reg);
158 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
159
160 return value;
161}
162
163static u32 ql_read_common_reg(struct ql3_adapter *qdev,
164 u32 __iomem * reg)
165{
166 return readl(reg);
167}
168
169static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
170{
171 u32 value;
172 unsigned long hw_flags;
173
174 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
175
176 if (qdev->current_page != 0)
177 ql_set_register_page(qdev,0);
178 value = readl(reg);
179
180 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
181 return value;
182}
183
184static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
185{
186 if (qdev->current_page != 0)
187 ql_set_register_page(qdev,0);
188 return readl(reg);
189}
190
191static void ql_write_common_reg_l(struct ql3_adapter *qdev,
192 u32 * reg, u32 value)
193{
194 unsigned long hw_flags;
195
196 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
197 writel(value, (u32 *) reg);
198 readl(reg);
199 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
200 return;
201}
202
203static void ql_write_common_reg(struct ql3_adapter *qdev,
204 u32 * reg, u32 value)
205{
206 writel(value, (u32 *) reg);
207 readl(reg);
208 return;
209}
210
211static void ql_write_page0_reg(struct ql3_adapter *qdev,
212 u32 * reg, u32 value)
213{
214 if (qdev->current_page != 0)
215 ql_set_register_page(qdev,0);
216 writel(value, (u32 *) reg);
217 readl(reg);
218 return;
219}
220
221/*
222 * Caller holds hw_lock. Only called during init.
223 */
224static void ql_write_page1_reg(struct ql3_adapter *qdev,
225 u32 * reg, u32 value)
226{
227 if (qdev->current_page != 1)
228 ql_set_register_page(qdev,1);
229 writel(value, (u32 *) reg);
230 readl(reg);
231 return;
232}
233
234/*
235 * Caller holds hw_lock. Only called during init.
236 */
237static void ql_write_page2_reg(struct ql3_adapter *qdev,
238 u32 * reg, u32 value)
239{
240 if (qdev->current_page != 2)
241 ql_set_register_page(qdev,2);
242 writel(value, (u32 *) reg);
243 readl(reg);
244 return;
245}
246
247static void ql_disable_interrupts(struct ql3_adapter *qdev)
248{
249 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
250
251 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
252 (ISP_IMR_ENABLE_INT << 16));
253
254}
255
256static void ql_enable_interrupts(struct ql3_adapter *qdev)
257{
258 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
259
260 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
261 ((0xff << 16) | ISP_IMR_ENABLE_INT));
262
263}
264
265static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
266 struct ql_rcv_buf_cb *lrg_buf_cb)
267{
268 u64 map;
269 lrg_buf_cb->next = NULL;
270
271 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
272 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
273 } else {
274 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
275 qdev->lrg_buf_free_tail = lrg_buf_cb;
276 }
277
278 if (!lrg_buf_cb->skb) {
279 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
280 if (unlikely(!lrg_buf_cb->skb)) {
281 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
282 qdev->ndev->name);
283 qdev->lrg_buf_skb_check++;
284 } else {
285 /*
286 * We save some space to copy the ethhdr from first
287 * buffer
288 */
289 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
290 map = pci_map_single(qdev->pdev,
291 lrg_buf_cb->skb->data,
292 qdev->lrg_buffer_len -
293 QL_HEADER_SPACE,
294 PCI_DMA_FROMDEVICE);
295 lrg_buf_cb->buf_phy_addr_low =
296 cpu_to_le32(LS_64BITS(map));
297 lrg_buf_cb->buf_phy_addr_high =
298 cpu_to_le32(MS_64BITS(map));
299 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
300 pci_unmap_len_set(lrg_buf_cb, maplen,
301 qdev->lrg_buffer_len -
302 QL_HEADER_SPACE);
303 }
304 }
305
306 qdev->lrg_buf_free_count++;
307}
308
309static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
310 *qdev)
311{
312 struct ql_rcv_buf_cb *lrg_buf_cb;
313
314 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
315 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
316 qdev->lrg_buf_free_tail = NULL;
317 qdev->lrg_buf_free_count--;
318 }
319
320 return lrg_buf_cb;
321}
322
323static u32 addrBits = EEPROM_NO_ADDR_BITS;
324static u32 dataBits = EEPROM_NO_DATA_BITS;
325
326static void fm93c56a_deselect(struct ql3_adapter *qdev);
327static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
328 unsigned short *value);
329
330/*
331 * Caller holds hw_lock.
332 */
333static void fm93c56a_select(struct ql3_adapter *qdev)
334{
335 struct ql3xxx_port_registers __iomem *port_regs =
336 qdev->mem_map_registers;
337
338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
339 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
341 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
343}
344
345/*
346 * Caller holds hw_lock.
347 */
348static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
349{
350 int i;
351 u32 mask;
352 u32 dataBit;
353 u32 previousBit;
354 struct ql3xxx_port_registers __iomem *port_regs =
355 qdev->mem_map_registers;
356
357 /* Clock in a zero, then do the start bit */
358 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
360 AUBURN_EEPROM_DO_1);
361 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
362 ISP_NVRAM_MASK | qdev->
363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
364 AUBURN_EEPROM_CLK_RISE);
365 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ISP_NVRAM_MASK | qdev->
367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
368 AUBURN_EEPROM_CLK_FALL);
369
370 mask = 1 << (FM93C56A_CMD_BITS - 1);
371 /* Force the previous data bit to be different */
372 previousBit = 0xffff;
373 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
374 dataBit =
375 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
376 if (previousBit != dataBit) {
377 /*
378 * If the bit changed, then change the DO state to
379 * match
380 */
381 ql_write_common_reg(qdev,
382 &port_regs->CommonRegs.
383 serialPortInterfaceReg,
384 ISP_NVRAM_MASK | qdev->
385 eeprom_cmd_data | dataBit);
386 previousBit = dataBit;
387 }
388 ql_write_common_reg(qdev,
389 &port_regs->CommonRegs.
390 serialPortInterfaceReg,
391 ISP_NVRAM_MASK | qdev->
392 eeprom_cmd_data | dataBit |
393 AUBURN_EEPROM_CLK_RISE);
394 ql_write_common_reg(qdev,
395 &port_regs->CommonRegs.
396 serialPortInterfaceReg,
397 ISP_NVRAM_MASK | qdev->
398 eeprom_cmd_data | dataBit |
399 AUBURN_EEPROM_CLK_FALL);
400 cmd = cmd << 1;
401 }
402
403 mask = 1 << (addrBits - 1);
404 /* Force the previous data bit to be different */
405 previousBit = 0xffff;
406 for (i = 0; i < addrBits; i++) {
407 dataBit =
408 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
409 AUBURN_EEPROM_DO_0;
410 if (previousBit != dataBit) {
411 /*
412 * If the bit changed, then change the DO state to
413 * match
414 */
415 ql_write_common_reg(qdev,
416 &port_regs->CommonRegs.
417 serialPortInterfaceReg,
418 ISP_NVRAM_MASK | qdev->
419 eeprom_cmd_data | dataBit);
420 previousBit = dataBit;
421 }
422 ql_write_common_reg(qdev,
423 &port_regs->CommonRegs.
424 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit |
427 AUBURN_EEPROM_CLK_RISE);
428 ql_write_common_reg(qdev,
429 &port_regs->CommonRegs.
430 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev->
432 eeprom_cmd_data | dataBit |
433 AUBURN_EEPROM_CLK_FALL);
434 eepromAddr = eepromAddr << 1;
435 }
436}
437
438/*
439 * Caller holds hw_lock.
440 */
441static void fm93c56a_deselect(struct ql3_adapter *qdev)
442{
443 struct ql3xxx_port_registers __iomem *port_regs =
444 qdev->mem_map_registers;
445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
446 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
448}
449
450/*
451 * Caller holds hw_lock.
452 */
453static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
454{
455 int i;
456 u32 data = 0;
457 u32 dataBit;
458 struct ql3xxx_port_registers __iomem *port_regs =
459 qdev->mem_map_registers;
460
461 /* Read the data bits */
462 /* The first bit is a dummy. Clock right over it. */
463 for (i = 0; i < dataBits; i++) {
464 ql_write_common_reg(qdev,
465 &port_regs->CommonRegs.
466 serialPortInterfaceReg,
467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE);
469 ql_write_common_reg(qdev,
470 &port_regs->CommonRegs.
471 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
473 AUBURN_EEPROM_CLK_FALL);
474 dataBit =
475 (ql_read_common_reg
476 (qdev,
477 &port_regs->CommonRegs.
478 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
479 data = (data << 1) | dataBit;
480 }
481 *value = (u16) data;
482}
483
484/*
485 * Caller holds hw_lock.
486 */
487static void eeprom_readword(struct ql3_adapter *qdev,
488 u32 eepromAddr, unsigned short *value)
489{
490 fm93c56a_select(qdev);
491 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
492 fm93c56a_datain(qdev, value);
493 fm93c56a_deselect(qdev);
494}
495
496static void ql_swap_mac_addr(u8 * macAddress)
497{
498#ifdef __BIG_ENDIAN
499 u8 temp;
500 temp = macAddress[0];
501 macAddress[0] = macAddress[1];
502 macAddress[1] = temp;
503 temp = macAddress[2];
504 macAddress[2] = macAddress[3];
505 macAddress[3] = temp;
506 temp = macAddress[4];
507 macAddress[4] = macAddress[5];
508 macAddress[5] = temp;
509#endif
510}
511
512static int ql_get_nvram_params(struct ql3_adapter *qdev)
513{
514 u16 *pEEPROMData;
515 u16 checksum = 0;
516 u32 index;
517 unsigned long hw_flags;
518
519 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
520
521 pEEPROMData = (u16 *) & qdev->nvram_data;
522 qdev->eeprom_cmd_data = 0;
523 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
524 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
525 2) << 10)) {
526 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
527 __func__);
528 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
529 return -1;
530 }
531
532 for (index = 0; index < EEPROM_SIZE; index++) {
533 eeprom_readword(qdev, index, pEEPROMData);
534 checksum += *pEEPROMData;
535 pEEPROMData++;
536 }
537 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
538
539 if (checksum != 0) {
540 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
541 qdev->ndev->name, checksum);
542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
543 return -1;
544 }
545
546 /*
547 * We have a problem with endianness for the MAC addresses
548 * and the two 8-bit values version, and numPorts. We
549 * have to swap them on big endian systems.
550 */
551 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
552 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
553 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
554 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
555 pEEPROMData = (u16 *) & qdev->nvram_data.version;
556 *pEEPROMData = le16_to_cpu(*pEEPROMData);
557
558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
559 return checksum;
560}
561
562static const u32 PHYAddr[2] = {
563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
564};
565
566static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
567{
568 struct ql3xxx_port_registers __iomem *port_regs =
569 qdev->mem_map_registers;
570 u32 temp;
571 int count = 1000;
572
573 while (count) {
574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
575 if (!(temp & MAC_MII_STATUS_BSY))
576 return 0;
577 udelay(10);
578 count--;
579 }
580 return -1;
581}
582
583static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
584{
585 struct ql3xxx_port_registers __iomem *port_regs =
586 qdev->mem_map_registers;
587 u32 scanControl;
588
589 if (qdev->numPorts > 1) {
590 /* Auto scan will cycle through multiple ports */
591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
592 } else {
593 scanControl = MAC_MII_CONTROL_SC;
594 }
595
596 /*
597 * Scan register 1 of PHY/PETBI,
598 * Set up to scan both devices
599 * The autoscan starts from the first register, completes
600 * the last one before rolling over to the first
601 */
602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
603 PHYAddr[0] | MII_SCAN_REGISTER);
604
605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
606 (scanControl) |
607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
608}
609
610static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
611{
612 u8 ret;
613 struct ql3xxx_port_registers __iomem *port_regs =
614 qdev->mem_map_registers;
615
616 /* See if scan mode is enabled before we turn it off */
617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
619 /* Scan is enabled */
620 ret = 1;
621 } else {
622 /* Scan is disabled */
623 ret = 0;
624 }
625
626 /*
627 * When disabling scan mode you must first change the MII register
628 * address
629 */
630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
631 PHYAddr[0] | MII_SCAN_REGISTER);
632
633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
635 MAC_MII_CONTROL_RC) << 16));
636
637 return ret;
638}
639
640static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
641 u16 regAddr, u16 value, u32 mac_index)
642{
643 struct ql3xxx_port_registers __iomem *port_regs =
644 qdev->mem_map_registers;
645 u8 scanWasEnabled;
646
647 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
648
649 if (ql_wait_for_mii_ready(qdev)) {
650 if (netif_msg_link(qdev))
651 printk(KERN_WARNING PFX
652 "%s Timed out waiting for management port to "
653 "get free before issuing command.\n",
654 qdev->ndev->name);
655 return -1;
656 }
657
658 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
659 PHYAddr[mac_index] | regAddr);
660
661 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
662
663 /* Wait for write to complete 9/10/04 SJP */
664 if (ql_wait_for_mii_ready(qdev)) {
665 if (netif_msg_link(qdev))
666 printk(KERN_WARNING PFX
667 "%s: Timed out waiting for management port to"
668 "get free before issuing command.\n",
669 qdev->ndev->name);
670 return -1;
671 }
672
673 if (scanWasEnabled)
674 ql_mii_enable_scan_mode(qdev);
675
676 return 0;
677}
678
679static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
680 u16 * value, u32 mac_index)
681{
682 struct ql3xxx_port_registers __iomem *port_regs =
683 qdev->mem_map_registers;
684 u8 scanWasEnabled;
685 u32 temp;
686
687 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
688
689 if (ql_wait_for_mii_ready(qdev)) {
690 if (netif_msg_link(qdev))
691 printk(KERN_WARNING PFX
692 "%s: Timed out waiting for management port to "
693 "get free before issuing command.\n",
694 qdev->ndev->name);
695 return -1;
696 }
697
698 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
699 PHYAddr[mac_index] | regAddr);
700
701 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
702 (MAC_MII_CONTROL_RC << 16));
703
704 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
705 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
706
707 /* Wait for the read to complete */
708 if (ql_wait_for_mii_ready(qdev)) {
709 if (netif_msg_link(qdev))
710 printk(KERN_WARNING PFX
711 "%s: Timed out waiting for management port to "
712 "get free after issuing command.\n",
713 qdev->ndev->name);
714 return -1;
715 }
716
717 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
718 *value = (u16) temp;
719
720 if (scanWasEnabled)
721 ql_mii_enable_scan_mode(qdev);
722
723 return 0;
724}
725
726static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
727{
728 struct ql3xxx_port_registers __iomem *port_regs =
729 qdev->mem_map_registers;
730
731 ql_mii_disable_scan_mode(qdev);
732
733 if (ql_wait_for_mii_ready(qdev)) {
734 if (netif_msg_link(qdev))
735 printk(KERN_WARNING PFX
736 "%s: Timed out waiting for management port to "
737 "get free before issuing command.\n",
738 qdev->ndev->name);
739 return -1;
740 }
741
742 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
743 qdev->PHYAddr | regAddr);
744
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
746
747 /* Wait for write to complete. */
748 if (ql_wait_for_mii_ready(qdev)) {
749 if (netif_msg_link(qdev))
750 printk(KERN_WARNING PFX
751 "%s: Timed out waiting for management port to "
752 "get free before issuing command.\n",
753 qdev->ndev->name);
754 return -1;
755 }
756
757 ql_mii_enable_scan_mode(qdev);
758
759 return 0;
760}
761
762static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
763{
764 u32 temp;
765 struct ql3xxx_port_registers __iomem *port_regs =
766 qdev->mem_map_registers;
767
768 ql_mii_disable_scan_mode(qdev);
769
770 if (ql_wait_for_mii_ready(qdev)) {
771 if (netif_msg_link(qdev))
772 printk(KERN_WARNING PFX
773 "%s: Timed out waiting for management port to "
774 "get free before issuing command.\n",
775 qdev->ndev->name);
776 return -1;
777 }
778
779 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
780 qdev->PHYAddr | regAddr);
781
782 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
783 (MAC_MII_CONTROL_RC << 16));
784
785 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
786 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
787
788 /* Wait for the read to complete */
789 if (ql_wait_for_mii_ready(qdev)) {
790 if (netif_msg_link(qdev))
791 printk(KERN_WARNING PFX
792 "%s: Timed out waiting for management port to "
793 "get free before issuing command.\n",
794 qdev->ndev->name);
795 return -1;
796 }
797
798 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
799 *value = (u16) temp;
800
801 ql_mii_enable_scan_mode(qdev);
802
803 return 0;
804}
805
806static void ql_petbi_reset(struct ql3_adapter *qdev)
807{
808 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
809}
810
811static void ql_petbi_start_neg(struct ql3_adapter *qdev)
812{
813 u16 reg;
814
815 /* Enable Auto-negotiation sense */
816 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
817 reg |= PETBI_TBI_AUTO_SENSE;
818 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
819
820 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
821 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
822
823 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
824 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
825 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
826
827}
828
829static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
830{
831 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
832 mac_index);
833}
834
835static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
836{
837 u16 reg;
838
839 /* Enable Auto-negotiation sense */
840 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
841 reg |= PETBI_TBI_AUTO_SENSE;
842 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
843
844 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
845 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
846
847 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
848 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
849 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
850 mac_index);
851}
852
853static void ql_petbi_init(struct ql3_adapter *qdev)
854{
855 ql_petbi_reset(qdev);
856 ql_petbi_start_neg(qdev);
857}
858
859static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
860{
861 ql_petbi_reset_ex(qdev, mac_index);
862 ql_petbi_start_neg_ex(qdev, mac_index);
863}
864
865static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
866{
867 u16 reg;
868
869 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
870 return 0;
871
872 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
873}
874
875static int ql_phy_get_speed(struct ql3_adapter *qdev)
876{
877 u16 reg;
878
879 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
880 return 0;
881
882 reg = (((reg & 0x18) >> 3) & 3);
883
884 if (reg == 2)
885 return SPEED_1000;
886 else if (reg == 1)
887 return SPEED_100;
888 else if (reg == 0)
889 return SPEED_10;
890 else
891 return -1;
892}
893
894static int ql_is_full_dup(struct ql3_adapter *qdev)
895{
896 u16 reg;
897
898 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
899 return 0;
900
901 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
902}
903
904static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
905{
906 u16 reg;
907
908 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
909 return 0;
910
911 return (reg & PHY_NEG_PAUSE) != 0;
912}
913
914/*
915 * Caller holds hw_lock.
916 */
917static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
918{
919 struct ql3xxx_port_registers __iomem *port_regs =
920 qdev->mem_map_registers;
921 u32 value;
922
923 if (enable)
924 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
925 else
926 value = (MAC_CONFIG_REG_PE << 16);
927
928 if (qdev->mac_index)
929 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
930 else
931 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
932}
933
934/*
935 * Caller holds hw_lock.
936 */
937static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
938{
939 struct ql3xxx_port_registers __iomem *port_regs =
940 qdev->mem_map_registers;
941 u32 value;
942
943 if (enable)
944 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
945 else
946 value = (MAC_CONFIG_REG_SR << 16);
947
948 if (qdev->mac_index)
949 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
950 else
951 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
952}
953
954/*
955 * Caller holds hw_lock.
956 */
957static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
958{
959 struct ql3xxx_port_registers __iomem *port_regs =
960 qdev->mem_map_registers;
961 u32 value;
962
963 if (enable)
964 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
965 else
966 value = (MAC_CONFIG_REG_GM << 16);
967
968 if (qdev->mac_index)
969 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
970 else
971 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
972}
973
974/*
975 * Caller holds hw_lock.
976 */
977static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
978{
979 struct ql3xxx_port_registers __iomem *port_regs =
980 qdev->mem_map_registers;
981 u32 value;
982
983 if (enable)
984 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
985 else
986 value = (MAC_CONFIG_REG_FD << 16);
987
988 if (qdev->mac_index)
989 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
990 else
991 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
992}
993
994/*
995 * Caller holds hw_lock.
996 */
997static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
998{
999 struct ql3xxx_port_registers __iomem *port_regs =
1000 qdev->mem_map_registers;
1001 u32 value;
1002
1003 if (enable)
1004 value =
1005 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1006 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1007 else
1008 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1009
1010 if (qdev->mac_index)
1011 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1012 else
1013 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1014}
1015
1016/*
1017 * Caller holds hw_lock.
1018 */
1019static int ql_is_fiber(struct ql3_adapter *qdev)
1020{
1021 struct ql3xxx_port_registers __iomem *port_regs =
1022 qdev->mem_map_registers;
1023 u32 bitToCheck = 0;
1024 u32 temp;
1025
1026 switch (qdev->mac_index) {
1027 case 0:
1028 bitToCheck = PORT_STATUS_SM0;
1029 break;
1030 case 1:
1031 bitToCheck = PORT_STATUS_SM1;
1032 break;
1033 }
1034
1035 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1036 return (temp & bitToCheck) != 0;
1037}
1038
1039static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1040{
1041 u16 reg;
1042 ql_mii_read_reg(qdev, 0x00, &reg);
1043 return (reg & 0x1000) != 0;
1044}
1045
1046/*
1047 * Caller holds hw_lock.
1048 */
1049static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1050{
1051 struct ql3xxx_port_registers __iomem *port_regs =
1052 qdev->mem_map_registers;
1053 u32 bitToCheck = 0;
1054 u32 temp;
1055
1056 switch (qdev->mac_index) {
1057 case 0:
1058 bitToCheck = PORT_STATUS_AC0;
1059 break;
1060 case 1:
1061 bitToCheck = PORT_STATUS_AC1;
1062 break;
1063 }
1064
1065 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1066 if (temp & bitToCheck) {
1067 if (netif_msg_link(qdev))
1068 printk(KERN_INFO PFX
1069 "%s: Auto-Negotiate complete.\n",
1070 qdev->ndev->name);
1071 return 1;
1072 } else {
1073 if (netif_msg_link(qdev))
1074 printk(KERN_WARNING PFX
1075 "%s: Auto-Negotiate incomplete.\n",
1076 qdev->ndev->name);
1077 return 0;
1078 }
1079}
1080
1081/*
1082 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1083 */
1084static int ql_is_neg_pause(struct ql3_adapter *qdev)
1085{
1086 if (ql_is_fiber(qdev))
1087 return ql_is_petbi_neg_pause(qdev);
1088 else
1089 return ql_is_phy_neg_pause(qdev);
1090}
1091
1092static int ql_auto_neg_error(struct ql3_adapter *qdev)
1093{
1094 struct ql3xxx_port_registers __iomem *port_regs =
1095 qdev->mem_map_registers;
1096 u32 bitToCheck = 0;
1097 u32 temp;
1098
1099 switch (qdev->mac_index) {
1100 case 0:
1101 bitToCheck = PORT_STATUS_AE0;
1102 break;
1103 case 1:
1104 bitToCheck = PORT_STATUS_AE1;
1105 break;
1106 }
1107 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1108 return (temp & bitToCheck) != 0;
1109}
1110
1111static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1112{
1113 if (ql_is_fiber(qdev))
1114 return SPEED_1000;
1115 else
1116 return ql_phy_get_speed(qdev);
1117}
1118
1119static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1120{
1121 if (ql_is_fiber(qdev))
1122 return 1;
1123 else
1124 return ql_is_full_dup(qdev);
1125}
1126
1127/*
1128 * Caller holds hw_lock.
1129 */
1130static int ql_link_down_detect(struct ql3_adapter *qdev)
1131{
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1134 u32 bitToCheck = 0;
1135 u32 temp;
1136
1137 switch (qdev->mac_index) {
1138 case 0:
1139 bitToCheck = ISP_CONTROL_LINK_DN_0;
1140 break;
1141 case 1:
1142 bitToCheck = ISP_CONTROL_LINK_DN_1;
1143 break;
1144 }
1145
1146 temp =
1147 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1148 return (temp & bitToCheck) != 0;
1149}
1150
1151/*
1152 * Caller holds hw_lock.
1153 */
1154static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1155{
1156 struct ql3xxx_port_registers __iomem *port_regs =
1157 qdev->mem_map_registers;
1158
1159 switch (qdev->mac_index) {
1160 case 0:
1161 ql_write_common_reg(qdev,
1162 &port_regs->CommonRegs.ispControlStatus,
1163 (ISP_CONTROL_LINK_DN_0) |
1164 (ISP_CONTROL_LINK_DN_0 << 16));
1165 break;
1166
1167 case 1:
1168 ql_write_common_reg(qdev,
1169 &port_regs->CommonRegs.ispControlStatus,
1170 (ISP_CONTROL_LINK_DN_1) |
1171 (ISP_CONTROL_LINK_DN_1 << 16));
1172 break;
1173
1174 default:
1175 return 1;
1176 }
1177
1178 return 0;
1179}
1180
1181/*
1182 * Caller holds hw_lock.
1183 */
1184static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
1185 u32 mac_index)
1186{
1187 struct ql3xxx_port_registers __iomem *port_regs =
1188 qdev->mem_map_registers;
1189 u32 bitToCheck = 0;
1190 u32 temp;
1191
1192 switch (mac_index) {
1193 case 0:
1194 bitToCheck = PORT_STATUS_F1_ENABLED;
1195 break;
1196 case 1:
1197 bitToCheck = PORT_STATUS_F3_ENABLED;
1198 break;
1199 default:
1200 break;
1201 }
1202
1203 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1204 if (temp & bitToCheck) {
1205 if (netif_msg_link(qdev))
1206 printk(KERN_DEBUG PFX
1207 "%s: is not link master.\n", qdev->ndev->name);
1208 return 0;
1209 } else {
1210 if (netif_msg_link(qdev))
1211 printk(KERN_DEBUG PFX
1212 "%s: is link master.\n", qdev->ndev->name);
1213 return 1;
1214 }
1215}
1216
1217static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
1218{
1219 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
1220}
1221
1222static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
1223{
1224 u16 reg;
1225
1226 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
1227 PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
1228
1229 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
1230 ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
1231 mac_index);
1232}
1233
1234static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
1235{
1236 ql_phy_reset_ex(qdev, mac_index);
1237 ql_phy_start_neg_ex(qdev, mac_index);
1238}
1239
1240/*
1241 * Caller holds hw_lock.
1242 */
1243static u32 ql_get_link_state(struct ql3_adapter *qdev)
1244{
1245 struct ql3xxx_port_registers __iomem *port_regs =
1246 qdev->mem_map_registers;
1247 u32 bitToCheck = 0;
1248 u32 temp, linkState;
1249
1250 switch (qdev->mac_index) {
1251 case 0:
1252 bitToCheck = PORT_STATUS_UP0;
1253 break;
1254 case 1:
1255 bitToCheck = PORT_STATUS_UP1;
1256 break;
1257 }
1258 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1259 if (temp & bitToCheck) {
1260 linkState = LS_UP;
1261 } else {
1262 linkState = LS_DOWN;
1263 if (netif_msg_link(qdev))
1264 printk(KERN_WARNING PFX
1265 "%s: Link is down.\n", qdev->ndev->name);
1266 }
1267 return linkState;
1268}
1269
1270static int ql_port_start(struct ql3_adapter *qdev)
1271{
1272 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1273 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1274 2) << 7))
1275 return -1;
1276
1277 if (ql_is_fiber(qdev)) {
1278 ql_petbi_init(qdev);
1279 } else {
1280 /* Copper port */
1281 ql_phy_init_ex(qdev, qdev->mac_index);
1282 }
1283
1284 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1285 return 0;
1286}
1287
1288static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1289{
1290
1291 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1292 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1293 2) << 7))
1294 return -1;
1295
1296 if (!ql_auto_neg_error(qdev)) {
1297 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1298 /* configure the MAC */
1299 if (netif_msg_link(qdev))
1300 printk(KERN_DEBUG PFX
1301 "%s: Configuring link.\n",
1302 qdev->ndev->
1303 name);
1304 ql_mac_cfg_soft_reset(qdev, 1);
1305 ql_mac_cfg_gig(qdev,
1306 (ql_get_link_speed
1307 (qdev) ==
1308 SPEED_1000));
1309 ql_mac_cfg_full_dup(qdev,
1310 ql_is_link_full_dup
1311 (qdev));
1312 ql_mac_cfg_pause(qdev,
1313 ql_is_neg_pause
1314 (qdev));
1315 ql_mac_cfg_soft_reset(qdev, 0);
1316
1317 /* enable the MAC */
1318 if (netif_msg_link(qdev))
1319 printk(KERN_DEBUG PFX
1320 "%s: Enabling mac.\n",
1321 qdev->ndev->
1322 name);
1323 ql_mac_enable(qdev, 1);
1324 }
1325
1326 if (netif_msg_link(qdev))
1327 printk(KERN_DEBUG PFX
1328 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1329 qdev->ndev->name);
1330 qdev->port_link_state = LS_UP;
1331 netif_start_queue(qdev->ndev);
1332 netif_carrier_on(qdev->ndev);
1333 if (netif_msg_link(qdev))
1334 printk(KERN_INFO PFX
1335 "%s: Link is up at %d Mbps, %s duplex.\n",
1336 qdev->ndev->name,
1337 ql_get_link_speed(qdev),
1338 ql_is_link_full_dup(qdev)
1339 ? "full" : "half");
1340
1341 } else { /* Remote error detected */
1342
1343 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1344 if (netif_msg_link(qdev))
1345 printk(KERN_DEBUG PFX
1346 "%s: Remote error detected. "
1347 "Calling ql_port_start().\n",
1348 qdev->ndev->
1349 name);
1350 /*
1351 * ql_port_start() is shared code and needs
1352 * to lock the PHY on it's own.
1353 */
1354 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1355 if(ql_port_start(qdev)) {/* Restart port */
1356 return -1;
1357 } else
1358 return 0;
1359 }
1360 }
1361 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1362 return 0;
1363}
1364
1365static void ql_link_state_machine(struct ql3_adapter *qdev)
1366{
1367 u32 curr_link_state;
1368 unsigned long hw_flags;
1369
1370 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1371
1372 curr_link_state = ql_get_link_state(qdev);
1373
1374 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1375 if (netif_msg_link(qdev))
1376 printk(KERN_INFO PFX
1377 "%s: Reset in progress, skip processing link "
1378 "state.\n", qdev->ndev->name);
1379 return;
1380 }
1381
1382 switch (qdev->port_link_state) {
1383 default:
1384 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1385 ql_port_start(qdev);
1386 }
1387 qdev->port_link_state = LS_DOWN;
1388 /* Fall Through */
1389
1390 case LS_DOWN:
1391 if (netif_msg_link(qdev))
1392 printk(KERN_DEBUG PFX
1393 "%s: port_link_state = LS_DOWN.\n",
1394 qdev->ndev->name);
1395 if (curr_link_state == LS_UP) {
1396 if (netif_msg_link(qdev))
1397 printk(KERN_DEBUG PFX
1398 "%s: curr_link_state = LS_UP.\n",
1399 qdev->ndev->name);
1400 if (ql_is_auto_neg_complete(qdev))
1401 ql_finish_auto_neg(qdev);
1402
1403 if (qdev->port_link_state == LS_UP)
1404 ql_link_down_detect_clear(qdev);
1405
1406 }
1407 break;
1408
1409 case LS_UP:
1410 /*
1411 * See if the link is currently down or went down and came
1412 * back up
1413 */
1414 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1415 if (netif_msg_link(qdev))
1416 printk(KERN_INFO PFX "%s: Link is down.\n",
1417 qdev->ndev->name);
1418 qdev->port_link_state = LS_DOWN;
1419 }
1420 break;
1421 }
1422 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1423}
1424
1425/*
1426 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1427 */
1428static void ql_get_phy_owner(struct ql3_adapter *qdev)
1429{
1430 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1431 set_bit(QL_LINK_MASTER,&qdev->flags);
1432 else
1433 clear_bit(QL_LINK_MASTER,&qdev->flags);
1434}
1435
1436/*
1437 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1438 */
1439static void ql_init_scan_mode(struct ql3_adapter *qdev)
1440{
1441 ql_mii_enable_scan_mode(qdev);
1442
1443 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1444 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1445 ql_petbi_init_ex(qdev, qdev->mac_index);
1446 } else {
1447 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1448 ql_phy_init_ex(qdev, qdev->mac_index);
1449 }
1450}
1451
1452/*
1453 * MII_Setup needs to be called before taking the PHY out of reset so that the
1454 * management interface clock speed can be set properly. It would be better if
1455 * we had a way to disable MDC until after the PHY is out of reset, but we
1456 * don't have that capability.
1457 */
1458static int ql_mii_setup(struct ql3_adapter *qdev)
1459{
1460 u32 reg;
1461 struct ql3xxx_port_registers __iomem *port_regs =
1462 qdev->mem_map_registers;
1463
1464 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1465 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1466 2) << 7))
1467 return -1;
1468
1469 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1470 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1471
1472 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1473 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1474
1475 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1476 return 0;
1477}
1478
1479static u32 ql_supported_modes(struct ql3_adapter *qdev)
1480{
1481 u32 supported;
1482
1483 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1484 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1485 | SUPPORTED_Autoneg;
1486 } else {
1487 supported = SUPPORTED_10baseT_Half
1488 | SUPPORTED_10baseT_Full
1489 | SUPPORTED_100baseT_Half
1490 | SUPPORTED_100baseT_Full
1491 | SUPPORTED_1000baseT_Half
1492 | SUPPORTED_1000baseT_Full
1493 | SUPPORTED_Autoneg | SUPPORTED_TP;
1494 }
1495
1496 return supported;
1497}
1498
1499static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1500{
1501 int status;
1502 unsigned long hw_flags;
1503 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1504 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1505 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1506 2) << 7))
1507 return 0;
1508 status = ql_is_auto_cfg(qdev);
1509 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1510 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1511 return status;
1512}
1513
1514static u32 ql_get_speed(struct ql3_adapter *qdev)
1515{
1516 u32 status;
1517 unsigned long hw_flags;
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7))
1522 return 0;
1523 status = ql_get_link_speed(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1526 return status;
1527}
1528
1529static int ql_get_full_dup(struct ql3_adapter *qdev)
1530{
1531 int status;
1532 unsigned long hw_flags;
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7))
1537 return 0;
1538 status = ql_is_link_full_dup(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1541 return status;
1542}
1543
1544
1545static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1546{
1547 struct ql3_adapter *qdev = netdev_priv(ndev);
1548
1549 ecmd->transceiver = XCVR_INTERNAL;
1550 ecmd->supported = ql_supported_modes(qdev);
1551
1552 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1553 ecmd->port = PORT_FIBRE;
1554 } else {
1555 ecmd->port = PORT_TP;
1556 ecmd->phy_address = qdev->PHYAddr;
1557 }
1558 ecmd->advertising = ql_supported_modes(qdev);
1559 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1560 ecmd->speed = ql_get_speed(qdev);
1561 ecmd->duplex = ql_get_full_dup(qdev);
1562 return 0;
1563}
1564
1565static void ql_get_drvinfo(struct net_device *ndev,
1566 struct ethtool_drvinfo *drvinfo)
1567{
1568 struct ql3_adapter *qdev = netdev_priv(ndev);
1569 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1570 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1571 strncpy(drvinfo->fw_version, "N/A", 32);
1572 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1573 drvinfo->n_stats = 0;
1574 drvinfo->testinfo_len = 0;
1575 drvinfo->regdump_len = 0;
1576 drvinfo->eedump_len = 0;
1577}
1578
1579static u32 ql_get_msglevel(struct net_device *ndev)
1580{
1581 struct ql3_adapter *qdev = netdev_priv(ndev);
1582 return qdev->msg_enable;
1583}
1584
1585static void ql_set_msglevel(struct net_device *ndev, u32 value)
1586{
1587 struct ql3_adapter *qdev = netdev_priv(ndev);
1588 qdev->msg_enable = value;
1589}
1590
1591static struct ethtool_ops ql3xxx_ethtool_ops = {
1592 .get_settings = ql_get_settings,
1593 .get_drvinfo = ql_get_drvinfo,
1594 .get_perm_addr = ethtool_op_get_perm_addr,
1595 .get_link = ethtool_op_get_link,
1596 .get_msglevel = ql_get_msglevel,
1597 .set_msglevel = ql_set_msglevel,
1598};
1599
1600static int ql_populate_free_queue(struct ql3_adapter *qdev)
1601{
1602 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1603 u64 map;
1604
1605 while (lrg_buf_cb) {
1606 if (!lrg_buf_cb->skb) {
1607 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
1608 if (unlikely(!lrg_buf_cb->skb)) {
1609 printk(KERN_DEBUG PFX
1610 "%s: Failed dev_alloc_skb().\n",
1611 qdev->ndev->name);
1612 break;
1613 } else {
1614 /*
1615 * We save some space to copy the ethhdr from
1616 * first buffer
1617 */
1618 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1619 map = pci_map_single(qdev->pdev,
1620 lrg_buf_cb->skb->data,
1621 qdev->lrg_buffer_len -
1622 QL_HEADER_SPACE,
1623 PCI_DMA_FROMDEVICE);
1624 lrg_buf_cb->buf_phy_addr_low =
1625 cpu_to_le32(LS_64BITS(map));
1626 lrg_buf_cb->buf_phy_addr_high =
1627 cpu_to_le32(MS_64BITS(map));
1628 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1629 pci_unmap_len_set(lrg_buf_cb, maplen,
1630 qdev->lrg_buffer_len -
1631 QL_HEADER_SPACE);
1632 --qdev->lrg_buf_skb_check;
1633 if (!qdev->lrg_buf_skb_check)
1634 return 1;
1635 }
1636 }
1637 lrg_buf_cb = lrg_buf_cb->next;
1638 }
1639 return 0;
1640}
1641
1642/*
1643 * Caller holds hw_lock.
1644 */
1645static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1646{
1647 struct bufq_addr_element *lrg_buf_q_ele;
1648 int i;
1649 struct ql_rcv_buf_cb *lrg_buf_cb;
1650 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1651
1652 if ((qdev->lrg_buf_free_count >= 8)
1653 && (qdev->lrg_buf_release_cnt >= 16)) {
1654
1655 if (qdev->lrg_buf_skb_check)
1656 if (!ql_populate_free_queue(qdev))
1657 return;
1658
1659 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1660
1661 while ((qdev->lrg_buf_release_cnt >= 16)
1662 && (qdev->lrg_buf_free_count >= 8)) {
1663
1664 for (i = 0; i < 8; i++) {
1665 lrg_buf_cb =
1666 ql_get_from_lrg_buf_free_list(qdev);
1667 lrg_buf_q_ele->addr_high =
1668 lrg_buf_cb->buf_phy_addr_high;
1669 lrg_buf_q_ele->addr_low =
1670 lrg_buf_cb->buf_phy_addr_low;
1671 lrg_buf_q_ele++;
1672
1673 qdev->lrg_buf_release_cnt--;
1674 }
1675
1676 qdev->lrg_buf_q_producer_index++;
1677
1678 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
1679 qdev->lrg_buf_q_producer_index = 0;
1680
1681 if (qdev->lrg_buf_q_producer_index ==
1682 (NUM_LBUFQ_ENTRIES - 1)) {
1683 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1684 }
1685 }
1686
1687 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1688
1689 ql_write_common_reg(qdev,
1690 (u32 *) & port_regs->CommonRegs.
1691 rxLargeQProducerIndex,
1692 qdev->lrg_buf_q_producer_index);
1693 }
1694}
1695
1696static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1697 struct ob_mac_iocb_rsp *mac_rsp)
1698{
1699 struct ql_tx_buf_cb *tx_cb;
1700
1701 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1702 pci_unmap_single(qdev->pdev,
1703 pci_unmap_addr(tx_cb, mapaddr),
1704 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
1705 dev_kfree_skb_irq(tx_cb->skb);
1706 qdev->stats.tx_packets++;
1707 qdev->stats.tx_bytes += tx_cb->skb->len;
1708 tx_cb->skb = NULL;
1709 atomic_inc(&qdev->tx_count);
1710}
1711
1712static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1713 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1714{
1715 long int offset;
1716 u32 lrg_buf_phy_addr_low = 0;
1717 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1718 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1719 u32 *curr_ial_ptr;
1720 struct sk_buff *skb;
1721 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1722
1723 /*
1724 * Get the inbound address list (small buffer).
1725 */
1726 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1727 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1728 qdev->small_buf_index = 0;
1729
1730 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1731 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1732 qdev->small_buf_release_cnt++;
1733
1734 /* start of first buffer */
1735 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1736 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1737 qdev->lrg_buf_release_cnt++;
1738 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1739 qdev->lrg_buf_index = 0;
1740 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1741 curr_ial_ptr++;
1742
1743 /* start of second buffer */
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1745 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1746
1747 /*
1748 * Second buffer gets sent up the stack.
1749 */
1750 qdev->lrg_buf_release_cnt++;
1751 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1752 qdev->lrg_buf_index = 0;
1753 skb = lrg_buf_cb2->skb;
1754
1755 qdev->stats.rx_packets++;
1756 qdev->stats.rx_bytes += length;
1757
1758 skb_put(skb, length);
1759 pci_unmap_single(qdev->pdev,
1760 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1761 pci_unmap_len(lrg_buf_cb2, maplen),
1762 PCI_DMA_FROMDEVICE);
1763 prefetch(skb->data);
1764 skb->dev = qdev->ndev;
1765 skb->ip_summed = CHECKSUM_NONE;
1766 skb->protocol = eth_type_trans(skb, qdev->ndev);
1767
1768 netif_receive_skb(skb);
1769 qdev->ndev->last_rx = jiffies;
1770 lrg_buf_cb2->skb = NULL;
1771
1772 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1773 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1774}
1775
1776static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1777 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1778{
1779 long int offset;
1780 u32 lrg_buf_phy_addr_low = 0;
1781 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1782 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1783 u32 *curr_ial_ptr;
1784 struct sk_buff *skb1, *skb2;
1785 struct net_device *ndev = qdev->ndev;
1786 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1787 u16 size = 0;
1788
1789 /*
1790 * Get the inbound address list (small buffer).
1791 */
1792
1793 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1794 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1795 qdev->small_buf_index = 0;
1796 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1797 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1798 qdev->small_buf_release_cnt++;
1799
1800 /* start of first buffer */
1801 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1802 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1803
1804 qdev->lrg_buf_release_cnt++;
1805 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1806 qdev->lrg_buf_index = 0;
1807 skb1 = lrg_buf_cb1->skb;
1808 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1809 curr_ial_ptr++;
1810
1811 /* start of second buffer */
1812 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1813 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1814 skb2 = lrg_buf_cb2->skb;
1815 qdev->lrg_buf_release_cnt++;
1816 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1817 qdev->lrg_buf_index = 0;
1818
1819 qdev->stats.rx_packets++;
1820 qdev->stats.rx_bytes += length;
1821
1822 /*
1823 * Copy the ethhdr from first buffer to second. This
1824 * is necessary for IP completions.
1825 */
1826 if (*((u16 *) skb1->data) != 0xFFFF)
1827 size = VLAN_ETH_HLEN;
1828 else
1829 size = ETH_HLEN;
1830
1831 skb_put(skb2, length); /* Just the second buffer length here. */
1832 pci_unmap_single(qdev->pdev,
1833 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1834 pci_unmap_len(lrg_buf_cb2, maplen),
1835 PCI_DMA_FROMDEVICE);
1836 prefetch(skb2->data);
1837
1838 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1839 skb2->dev = qdev->ndev;
1840 skb2->ip_summed = CHECKSUM_NONE;
1841 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1842
1843 netif_receive_skb(skb2);
1844 ndev->last_rx = jiffies;
1845 lrg_buf_cb2->skb = NULL;
1846
1847 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1848 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1849}
1850
1851static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1852 int *tx_cleaned, int *rx_cleaned, int work_to_do)
1853{
1854 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1855 struct net_rsp_iocb *net_rsp;
1856 struct net_device *ndev = qdev->ndev;
1857 unsigned long hw_flags;
1858
1859 /* While there are entries in the completion queue. */
1860 while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
1861 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
1862
1863 net_rsp = qdev->rsp_current;
1864 switch (net_rsp->opcode) {
1865
1866 case OPCODE_OB_MAC_IOCB_FN0:
1867 case OPCODE_OB_MAC_IOCB_FN2:
1868 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
1869 net_rsp);
1870 (*tx_cleaned)++;
1871 break;
1872
1873 case OPCODE_IB_MAC_IOCB:
1874 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1875 net_rsp);
1876 (*rx_cleaned)++;
1877 break;
1878
1879 case OPCODE_IB_IP_IOCB:
1880 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1881 net_rsp);
1882 (*rx_cleaned)++;
1883 break;
1884 default:
1885 {
1886 u32 *tmp = (u32 *) net_rsp;
1887 printk(KERN_ERR PFX
1888 "%s: Hit default case, not "
1889 "handled!\n"
1890 " dropping the packet, opcode = "
1891 "%x.\n",
1892 ndev->name, net_rsp->opcode);
1893 printk(KERN_ERR PFX
1894 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
1895 (unsigned long int)tmp[0],
1896 (unsigned long int)tmp[1],
1897 (unsigned long int)tmp[2],
1898 (unsigned long int)tmp[3]);
1899 }
1900 }
1901
1902 qdev->rsp_consumer_index++;
1903
1904 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
1905 qdev->rsp_consumer_index = 0;
1906 qdev->rsp_current = qdev->rsp_q_virt_addr;
1907 } else {
1908 qdev->rsp_current++;
1909 }
1910 }
1911
1912 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1913
1914 ql_update_lrg_bufq_prod_index(qdev);
1915
1916 if (qdev->small_buf_release_cnt >= 16) {
1917 while (qdev->small_buf_release_cnt >= 16) {
1918 qdev->small_buf_q_producer_index++;
1919
1920 if (qdev->small_buf_q_producer_index ==
1921 NUM_SBUFQ_ENTRIES)
1922 qdev->small_buf_q_producer_index = 0;
1923 qdev->small_buf_release_cnt -= 8;
1924 }
1925
1926 ql_write_common_reg(qdev,
1927 (u32 *) & port_regs->CommonRegs.
1928 rxSmallQProducerIndex,
1929 qdev->small_buf_q_producer_index);
1930 }
1931
1932 ql_write_common_reg(qdev,
1933 (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
1934 qdev->rsp_consumer_index);
1935 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1936
1937 if (unlikely(netif_queue_stopped(qdev->ndev))) {
1938 if (netif_queue_stopped(qdev->ndev) &&
1939 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
1940 netif_wake_queue(qdev->ndev);
1941 }
1942
1943 return *tx_cleaned + *rx_cleaned;
1944}
1945
1946static int ql_poll(struct net_device *ndev, int *budget)
1947{
1948 struct ql3_adapter *qdev = netdev_priv(ndev);
1949 int work_to_do = min(*budget, ndev->quota);
1950 int rx_cleaned = 0, tx_cleaned = 0;
1951
1952 if (!netif_carrier_ok(ndev))
1953 goto quit_polling;
1954
1955 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
1956 *budget -= rx_cleaned;
1957 ndev->quota -= rx_cleaned;
1958
1959 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
1960quit_polling:
1961 netif_rx_complete(ndev);
1962 ql_enable_interrupts(qdev);
1963 return 0;
1964 }
1965 return 1;
1966}
1967
1968static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
1969{
1970
1971 struct net_device *ndev = dev_id;
1972 struct ql3_adapter *qdev = netdev_priv(ndev);
1973 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1974 u32 value;
1975 int handled = 1;
1976 u32 var;
1977
1978 port_regs = qdev->mem_map_registers;
1979
1980 value =
1981 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
1982
1983 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
1984 spin_lock(&qdev->adapter_lock);
1985 netif_stop_queue(qdev->ndev);
1986 netif_carrier_off(qdev->ndev);
1987 ql_disable_interrupts(qdev);
1988 qdev->port_link_state = LS_DOWN;
1989 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
1990
1991 if (value & ISP_CONTROL_FE) {
1992 /*
1993 * Chip Fatal Error.
1994 */
1995 var =
1996 ql_read_page0_reg_l(qdev,
1997 &port_regs->PortFatalErrStatus);
1998 printk(KERN_WARNING PFX
1999 "%s: Resetting chip. PortFatalErrStatus "
2000 "register = 0x%x\n", ndev->name, var);
2001 set_bit(QL_RESET_START,&qdev->flags) ;
2002 } else {
2003 /*
2004 * Soft Reset Requested.
2005 */
2006 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2007 printk(KERN_ERR PFX
2008 "%s: Another function issued a reset to the "
2009 "chip. ISR value = %x.\n", ndev->name, value);
2010 }
2011 queue_work(qdev->workqueue, &qdev->reset_work);
2012 spin_unlock(&qdev->adapter_lock);
2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2014 ql_disable_interrupts(qdev);
2015 if (likely(netif_rx_schedule_prep(ndev)))
2016 __netif_rx_schedule(ndev);
2017 else
2018 ql_enable_interrupts(qdev);
2019 } else {
2020 return IRQ_NONE;
2021 }
2022
2023 return IRQ_RETVAL(handled);
2024}
2025
2026static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2027{
2028 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2029 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2030 struct ql_tx_buf_cb *tx_cb;
2031 struct ob_mac_iocb_req *mac_iocb_ptr;
2032 u64 map;
2033
2034 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2035 if (!netif_queue_stopped(ndev))
2036 netif_stop_queue(ndev);
2037 return NETDEV_TX_BUSY;
2038 }
2039 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2040 mac_iocb_ptr = tx_cb->queue_entry;
2041 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2042 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2043 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2044 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2045 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
2046 tx_cb->skb = skb;
2047 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2048 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
2049 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
2050 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
2051 pci_unmap_addr_set(tx_cb, mapaddr, map);
2052 pci_unmap_len_set(tx_cb, maplen, skb->len);
2053 atomic_dec(&qdev->tx_count);
2054
2055 qdev->req_producer_index++;
2056 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2057 qdev->req_producer_index = 0;
2058 wmb();
2059 ql_write_common_reg_l(qdev,
2060 (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
2061 qdev->req_producer_index);
2062
2063 ndev->trans_start = jiffies;
2064 if (netif_msg_tx_queued(qdev))
2065 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2066 ndev->name, qdev->req_producer_index, skb->len);
2067
2068 return NETDEV_TX_OK;
2069}
2070static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2071{
2072 qdev->req_q_size =
2073 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2074
2075 qdev->req_q_virt_addr =
2076 pci_alloc_consistent(qdev->pdev,
2077 (size_t) qdev->req_q_size,
2078 &qdev->req_q_phy_addr);
2079
2080 if ((qdev->req_q_virt_addr == NULL) ||
2081 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2082 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2083 qdev->ndev->name);
2084 return -ENOMEM;
2085 }
2086
2087 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2088
2089 qdev->rsp_q_virt_addr =
2090 pci_alloc_consistent(qdev->pdev,
2091 (size_t) qdev->rsp_q_size,
2092 &qdev->rsp_q_phy_addr);
2093
2094 if ((qdev->rsp_q_virt_addr == NULL) ||
2095 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2096 printk(KERN_ERR PFX
2097 "%s: rspQ allocation failed\n",
2098 qdev->ndev->name);
2099 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2100 qdev->req_q_virt_addr,
2101 qdev->req_q_phy_addr);
2102 return -ENOMEM;
2103 }
2104
2105 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2106
2107 return 0;
2108}
2109
2110static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2111{
2112 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2113 printk(KERN_INFO PFX
2114 "%s: Already done.\n", qdev->ndev->name);
2115 return;
2116 }
2117
2118 pci_free_consistent(qdev->pdev,
2119 qdev->req_q_size,
2120 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2121
2122 qdev->req_q_virt_addr = NULL;
2123
2124 pci_free_consistent(qdev->pdev,
2125 qdev->rsp_q_size,
2126 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2127
2128 qdev->rsp_q_virt_addr = NULL;
2129
2130 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2131}
2132
2133static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2134{
2135 /* Create Large Buffer Queue */
2136 qdev->lrg_buf_q_size =
2137 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2138 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2139 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2140 else
2141 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2142
2143 qdev->lrg_buf_q_alloc_virt_addr =
2144 pci_alloc_consistent(qdev->pdev,
2145 qdev->lrg_buf_q_alloc_size,
2146 &qdev->lrg_buf_q_alloc_phy_addr);
2147
2148 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2149 printk(KERN_ERR PFX
2150 "%s: lBufQ failed\n", qdev->ndev->name);
2151 return -ENOMEM;
2152 }
2153 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2154 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2155
2156 /* Create Small Buffer Queue */
2157 qdev->small_buf_q_size =
2158 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2159 if (qdev->small_buf_q_size < PAGE_SIZE)
2160 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2161 else
2162 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2163
2164 qdev->small_buf_q_alloc_virt_addr =
2165 pci_alloc_consistent(qdev->pdev,
2166 qdev->small_buf_q_alloc_size,
2167 &qdev->small_buf_q_alloc_phy_addr);
2168
2169 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2170 printk(KERN_ERR PFX
2171 "%s: Small Buffer Queue allocation failed.\n",
2172 qdev->ndev->name);
2173 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2174 qdev->lrg_buf_q_alloc_virt_addr,
2175 qdev->lrg_buf_q_alloc_phy_addr);
2176 return -ENOMEM;
2177 }
2178
2179 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2180 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2181 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2182 return 0;
2183}
2184
2185static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2186{
2187 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2188 printk(KERN_INFO PFX
2189 "%s: Already done.\n", qdev->ndev->name);
2190 return;
2191 }
2192
2193 pci_free_consistent(qdev->pdev,
2194 qdev->lrg_buf_q_alloc_size,
2195 qdev->lrg_buf_q_alloc_virt_addr,
2196 qdev->lrg_buf_q_alloc_phy_addr);
2197
2198 qdev->lrg_buf_q_virt_addr = NULL;
2199
2200 pci_free_consistent(qdev->pdev,
2201 qdev->small_buf_q_alloc_size,
2202 qdev->small_buf_q_alloc_virt_addr,
2203 qdev->small_buf_q_alloc_phy_addr);
2204
2205 qdev->small_buf_q_virt_addr = NULL;
2206
2207 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2208}
2209
2210static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2211{
2212 int i;
2213 struct bufq_addr_element *small_buf_q_entry;
2214
2215 /* Currently we allocate on one of memory and use it for smallbuffers */
2216 qdev->small_buf_total_size =
2217 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2218 QL_SMALL_BUFFER_SIZE);
2219
2220 qdev->small_buf_virt_addr =
2221 pci_alloc_consistent(qdev->pdev,
2222 qdev->small_buf_total_size,
2223 &qdev->small_buf_phy_addr);
2224
2225 if (qdev->small_buf_virt_addr == NULL) {
2226 printk(KERN_ERR PFX
2227 "%s: Failed to get small buffer memory.\n",
2228 qdev->ndev->name);
2229 return -ENOMEM;
2230 }
2231
2232 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2233 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2234
2235 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2236
2237 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2238
2239 /* Initialize the small buffer queue. */
2240 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2241 small_buf_q_entry->addr_high =
2242 cpu_to_le32(qdev->small_buf_phy_addr_high);
2243 small_buf_q_entry->addr_low =
2244 cpu_to_le32(qdev->small_buf_phy_addr_low +
2245 (i * QL_SMALL_BUFFER_SIZE));
2246 small_buf_q_entry++;
2247 }
2248 qdev->small_buf_index = 0;
2249 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2250 return 0;
2251}
2252
2253static void ql_free_small_buffers(struct ql3_adapter *qdev)
2254{
2255 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2256 printk(KERN_INFO PFX
2257 "%s: Already done.\n", qdev->ndev->name);
2258 return;
2259 }
2260 if (qdev->small_buf_virt_addr != NULL) {
2261 pci_free_consistent(qdev->pdev,
2262 qdev->small_buf_total_size,
2263 qdev->small_buf_virt_addr,
2264 qdev->small_buf_phy_addr);
2265
2266 qdev->small_buf_virt_addr = NULL;
2267 }
2268}
2269
2270static void ql_free_large_buffers(struct ql3_adapter *qdev)
2271{
2272 int i = 0;
2273 struct ql_rcv_buf_cb *lrg_buf_cb;
2274
2275 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2276 lrg_buf_cb = &qdev->lrg_buf[i];
2277 if (lrg_buf_cb->skb) {
2278 dev_kfree_skb(lrg_buf_cb->skb);
2279 pci_unmap_single(qdev->pdev,
2280 pci_unmap_addr(lrg_buf_cb, mapaddr),
2281 pci_unmap_len(lrg_buf_cb, maplen),
2282 PCI_DMA_FROMDEVICE);
2283 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2284 } else {
2285 break;
2286 }
2287 }
2288}
2289
2290static void ql_init_large_buffers(struct ql3_adapter *qdev)
2291{
2292 int i;
2293 struct ql_rcv_buf_cb *lrg_buf_cb;
2294 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2295
2296 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2297 lrg_buf_cb = &qdev->lrg_buf[i];
2298 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2299 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2300 buf_addr_ele++;
2301 }
2302 qdev->lrg_buf_index = 0;
2303 qdev->lrg_buf_skb_check = 0;
2304}
2305
2306static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2307{
2308 int i;
2309 struct ql_rcv_buf_cb *lrg_buf_cb;
2310 struct sk_buff *skb;
2311 u64 map;
2312
2313 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2314 skb = dev_alloc_skb(qdev->lrg_buffer_len);
2315 if (unlikely(!skb)) {
2316 /* Better luck next round */
2317 printk(KERN_ERR PFX
2318 "%s: large buff alloc failed, "
2319 "for %d bytes at index %d.\n",
2320 qdev->ndev->name,
2321 qdev->lrg_buffer_len * 2, i);
2322 ql_free_large_buffers(qdev);
2323 return -ENOMEM;
2324 } else {
2325
2326 lrg_buf_cb = &qdev->lrg_buf[i];
2327 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2328 lrg_buf_cb->index = i;
2329 lrg_buf_cb->skb = skb;
2330 /*
2331 * We save some space to copy the ethhdr from first
2332 * buffer
2333 */
2334 skb_reserve(skb, QL_HEADER_SPACE);
2335 map = pci_map_single(qdev->pdev,
2336 skb->data,
2337 qdev->lrg_buffer_len -
2338 QL_HEADER_SPACE,
2339 PCI_DMA_FROMDEVICE);
2340 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2341 pci_unmap_len_set(lrg_buf_cb, maplen,
2342 qdev->lrg_buffer_len -
2343 QL_HEADER_SPACE);
2344 lrg_buf_cb->buf_phy_addr_low =
2345 cpu_to_le32(LS_64BITS(map));
2346 lrg_buf_cb->buf_phy_addr_high =
2347 cpu_to_le32(MS_64BITS(map));
2348 }
2349 }
2350 return 0;
2351}
2352
2353static void ql_create_send_free_list(struct ql3_adapter *qdev)
2354{
2355 struct ql_tx_buf_cb *tx_cb;
2356 int i;
2357 struct ob_mac_iocb_req *req_q_curr =
2358 qdev->req_q_virt_addr;
2359
2360 /* Create free list of transmit buffers */
2361 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2362 tx_cb = &qdev->tx_buf[i];
2363 tx_cb->skb = NULL;
2364 tx_cb->queue_entry = req_q_curr;
2365 req_q_curr++;
2366 }
2367}
2368
2369static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2370{
2371 if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
2372 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2373 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2374 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2375 } else {
2376 printk(KERN_ERR PFX
2377 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2378 qdev->ndev->name);
2379 return -ENOMEM;
2380 }
2381 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2382 qdev->max_frame_size =
2383 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2384
2385 /*
2386 * First allocate a page of shared memory and use it for shadow
2387 * locations of Network Request Queue Consumer Address Register and
2388 * Network Completion Queue Producer Index Register
2389 */
2390 qdev->shadow_reg_virt_addr =
2391 pci_alloc_consistent(qdev->pdev,
2392 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2393
2394 if (qdev->shadow_reg_virt_addr != NULL) {
2395 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2396 qdev->req_consumer_index_phy_addr_high =
2397 MS_64BITS(qdev->shadow_reg_phy_addr);
2398 qdev->req_consumer_index_phy_addr_low =
2399 LS_64BITS(qdev->shadow_reg_phy_addr);
2400
2401 qdev->prsp_producer_index =
2402 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2403 qdev->rsp_producer_index_phy_addr_high =
2404 qdev->req_consumer_index_phy_addr_high;
2405 qdev->rsp_producer_index_phy_addr_low =
2406 qdev->req_consumer_index_phy_addr_low + 8;
2407 } else {
2408 printk(KERN_ERR PFX
2409 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
2410 return -ENOMEM;
2411 }
2412
2413 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2414 printk(KERN_ERR PFX
2415 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2416 qdev->ndev->name);
2417 goto err_req_rsp;
2418 }
2419
2420 if (ql_alloc_buffer_queues(qdev) != 0) {
2421 printk(KERN_ERR PFX
2422 "%s: ql_alloc_buffer_queues failed.\n",
2423 qdev->ndev->name);
2424 goto err_buffer_queues;
2425 }
2426
2427 if (ql_alloc_small_buffers(qdev) != 0) {
2428 printk(KERN_ERR PFX
2429 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
2430 goto err_small_buffers;
2431 }
2432
2433 if (ql_alloc_large_buffers(qdev) != 0) {
2434 printk(KERN_ERR PFX
2435 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
2436 goto err_small_buffers;
2437 }
2438
2439 /* Initialize the large buffer queue. */
2440 ql_init_large_buffers(qdev);
2441 ql_create_send_free_list(qdev);
2442
2443 qdev->rsp_current = qdev->rsp_q_virt_addr;
2444
2445 return 0;
2446
2447err_small_buffers:
2448 ql_free_buffer_queues(qdev);
2449err_buffer_queues:
2450 ql_free_net_req_rsp_queues(qdev);
2451err_req_rsp:
2452 pci_free_consistent(qdev->pdev,
2453 PAGE_SIZE,
2454 qdev->shadow_reg_virt_addr,
2455 qdev->shadow_reg_phy_addr);
2456
2457 return -ENOMEM;
2458}
2459
2460static void ql_free_mem_resources(struct ql3_adapter *qdev)
2461{
2462 ql_free_large_buffers(qdev);
2463 ql_free_small_buffers(qdev);
2464 ql_free_buffer_queues(qdev);
2465 ql_free_net_req_rsp_queues(qdev);
2466 if (qdev->shadow_reg_virt_addr != NULL) {
2467 pci_free_consistent(qdev->pdev,
2468 PAGE_SIZE,
2469 qdev->shadow_reg_virt_addr,
2470 qdev->shadow_reg_phy_addr);
2471 qdev->shadow_reg_virt_addr = NULL;
2472 }
2473}
2474
2475static int ql_init_misc_registers(struct ql3_adapter *qdev)
2476{
2477 struct ql3xxx_local_ram_registers *local_ram =
2478 (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
2479
2480 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2481 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2482 2) << 4))
2483 return -1;
2484
2485 ql_write_page2_reg(qdev,
2486 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2487
2488 ql_write_page2_reg(qdev,
2489 &local_ram->maxBufletCount,
2490 qdev->nvram_data.bufletCount);
2491
2492 ql_write_page2_reg(qdev,
2493 &local_ram->freeBufletThresholdLow,
2494 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2495 (qdev->nvram_data.tcpWindowThreshold0));
2496
2497 ql_write_page2_reg(qdev,
2498 &local_ram->freeBufletThresholdHigh,
2499 qdev->nvram_data.tcpWindowThreshold50);
2500
2501 ql_write_page2_reg(qdev,
2502 &local_ram->ipHashTableBase,
2503 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2504 qdev->nvram_data.ipHashTableBaseLo);
2505 ql_write_page2_reg(qdev,
2506 &local_ram->ipHashTableCount,
2507 qdev->nvram_data.ipHashTableSize);
2508 ql_write_page2_reg(qdev,
2509 &local_ram->tcpHashTableBase,
2510 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2511 qdev->nvram_data.tcpHashTableBaseLo);
2512 ql_write_page2_reg(qdev,
2513 &local_ram->tcpHashTableCount,
2514 qdev->nvram_data.tcpHashTableSize);
2515 ql_write_page2_reg(qdev,
2516 &local_ram->ncbBase,
2517 (qdev->nvram_data.ncbTableBaseHi << 16) |
2518 qdev->nvram_data.ncbTableBaseLo);
2519 ql_write_page2_reg(qdev,
2520 &local_ram->maxNcbCount,
2521 qdev->nvram_data.ncbTableSize);
2522 ql_write_page2_reg(qdev,
2523 &local_ram->drbBase,
2524 (qdev->nvram_data.drbTableBaseHi << 16) |
2525 qdev->nvram_data.drbTableBaseLo);
2526 ql_write_page2_reg(qdev,
2527 &local_ram->maxDrbCount,
2528 qdev->nvram_data.drbTableSize);
2529 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2530 return 0;
2531}
2532
2533static int ql_adapter_initialize(struct ql3_adapter *qdev)
2534{
2535 u32 value;
2536 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2537 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
2538 (struct ql3xxx_host_memory_registers *)port_regs;
2539 u32 delay = 10;
2540 int status = 0;
2541
2542 if(ql_mii_setup(qdev))
2543 return -1;
2544
2545 /* Bring out PHY out of reset */
2546 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2547 (ISP_SERIAL_PORT_IF_WE |
2548 (ISP_SERIAL_PORT_IF_WE << 16)));
2549
2550 qdev->port_link_state = LS_DOWN;
2551 netif_carrier_off(qdev->ndev);
2552
2553 /* V2 chip fix for ARS-39168. */
2554 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2555 (ISP_SERIAL_PORT_IF_SDE |
2556 (ISP_SERIAL_PORT_IF_SDE << 16)));
2557
2558 /* Request Queue Registers */
2559 *((u32 *) (qdev->preq_consumer_index)) = 0;
2560 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
2561 qdev->req_producer_index = 0;
2562
2563 ql_write_page1_reg(qdev,
2564 &hmem_regs->reqConsumerIndexAddrHigh,
2565 qdev->req_consumer_index_phy_addr_high);
2566 ql_write_page1_reg(qdev,
2567 &hmem_regs->reqConsumerIndexAddrLow,
2568 qdev->req_consumer_index_phy_addr_low);
2569
2570 ql_write_page1_reg(qdev,
2571 &hmem_regs->reqBaseAddrHigh,
2572 MS_64BITS(qdev->req_q_phy_addr));
2573 ql_write_page1_reg(qdev,
2574 &hmem_regs->reqBaseAddrLow,
2575 LS_64BITS(qdev->req_q_phy_addr));
2576 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
2577
2578 /* Response Queue Registers */
2579 *((u16 *) (qdev->prsp_producer_index)) = 0;
2580 qdev->rsp_consumer_index = 0;
2581 qdev->rsp_current = qdev->rsp_q_virt_addr;
2582
2583 ql_write_page1_reg(qdev,
2584 &hmem_regs->rspProducerIndexAddrHigh,
2585 qdev->rsp_producer_index_phy_addr_high);
2586
2587 ql_write_page1_reg(qdev,
2588 &hmem_regs->rspProducerIndexAddrLow,
2589 qdev->rsp_producer_index_phy_addr_low);
2590
2591 ql_write_page1_reg(qdev,
2592 &hmem_regs->rspBaseAddrHigh,
2593 MS_64BITS(qdev->rsp_q_phy_addr));
2594
2595 ql_write_page1_reg(qdev,
2596 &hmem_regs->rspBaseAddrLow,
2597 LS_64BITS(qdev->rsp_q_phy_addr));
2598
2599 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
2600
2601 /* Large Buffer Queue */
2602 ql_write_page1_reg(qdev,
2603 &hmem_regs->rxLargeQBaseAddrHigh,
2604 MS_64BITS(qdev->lrg_buf_q_phy_addr));
2605
2606 ql_write_page1_reg(qdev,
2607 &hmem_regs->rxLargeQBaseAddrLow,
2608 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2609
2610 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
2611
2612 ql_write_page1_reg(qdev,
2613 &hmem_regs->rxLargeBufferLength,
2614 qdev->lrg_buffer_len);
2615
2616 /* Small Buffer Queue */
2617 ql_write_page1_reg(qdev,
2618 &hmem_regs->rxSmallQBaseAddrHigh,
2619 MS_64BITS(qdev->small_buf_q_phy_addr));
2620
2621 ql_write_page1_reg(qdev,
2622 &hmem_regs->rxSmallQBaseAddrLow,
2623 LS_64BITS(qdev->small_buf_q_phy_addr));
2624
2625 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
2626 ql_write_page1_reg(qdev,
2627 &hmem_regs->rxSmallBufferLength,
2628 QL_SMALL_BUFFER_SIZE);
2629
2630 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2631 qdev->small_buf_release_cnt = 8;
2632 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
2633 qdev->lrg_buf_release_cnt = 8;
2634 qdev->lrg_buf_next_free =
2635 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
2636 qdev->small_buf_index = 0;
2637 qdev->lrg_buf_index = 0;
2638 qdev->lrg_buf_free_count = 0;
2639 qdev->lrg_buf_free_head = NULL;
2640 qdev->lrg_buf_free_tail = NULL;
2641
2642 ql_write_common_reg(qdev,
2643 (u32 *) & port_regs->CommonRegs.
2644 rxSmallQProducerIndex,
2645 qdev->small_buf_q_producer_index);
2646 ql_write_common_reg(qdev,
2647 (u32 *) & port_regs->CommonRegs.
2648 rxLargeQProducerIndex,
2649 qdev->lrg_buf_q_producer_index);
2650
2651 /*
2652 * Find out if the chip has already been initialized. If it has, then
2653 * we skip some of the initialization.
2654 */
2655 clear_bit(QL_LINK_MASTER, &qdev->flags);
2656 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2657 if ((value & PORT_STATUS_IC) == 0) {
2658
2659 /* Chip has not been configured yet, so let it rip. */
2660 if(ql_init_misc_registers(qdev)) {
2661 status = -1;
2662 goto out;
2663 }
2664
2665 if (qdev->mac_index)
2666 ql_write_page0_reg(qdev,
2667 &port_regs->mac1MaxFrameLengthReg,
2668 qdev->max_frame_size);
2669 else
2670 ql_write_page0_reg(qdev,
2671 &port_regs->mac0MaxFrameLengthReg,
2672 qdev->max_frame_size);
2673
2674 value = qdev->nvram_data.tcpMaxWindowSize;
2675 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
2676
2677 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
2678
2679 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
2680 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
2681 * 2) << 13)) {
2682 status = -1;
2683 goto out;
2684 }
2685 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
2686 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
2687 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
2688 16) | (INTERNAL_CHIP_SD |
2689 INTERNAL_CHIP_WE)));
2690 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
2691 }
2692
2693
2694 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
2695 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2696 2) << 7)) {
2697 status = -1;
2698 goto out;
2699 }
2700
2701 ql_init_scan_mode(qdev);
2702 ql_get_phy_owner(qdev);
2703
2704 /* Load the MAC Configuration */
2705
2706 /* Program lower 32 bits of the MAC address */
2707 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2708 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
2709 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2710 ((qdev->ndev->dev_addr[2] << 24)
2711 | (qdev->ndev->dev_addr[3] << 16)
2712 | (qdev->ndev->dev_addr[4] << 8)
2713 | qdev->ndev->dev_addr[5]));
2714
2715 /* Program top 16 bits of the MAC address */
2716 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2717 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
2718 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2719 ((qdev->ndev->dev_addr[0] << 8)
2720 | qdev->ndev->dev_addr[1]));
2721
2722 /* Enable Primary MAC */
2723 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2724 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
2725 MAC_ADDR_INDIRECT_PTR_REG_PE));
2726
2727 /* Clear Primary and Secondary IP addresses */
2728 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2729 ((IP_ADDR_INDEX_REG_MASK << 16) |
2730 (qdev->mac_index << 2)));
2731 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2732
2733 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2734 ((IP_ADDR_INDEX_REG_MASK << 16) |
2735 ((qdev->mac_index << 2) + 1)));
2736 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2737
2738 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
2739
2740 /* Indicate Configuration Complete */
2741 ql_write_page0_reg(qdev,
2742 &port_regs->portControl,
2743 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
2744
2745 do {
2746 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2747 if (value & PORT_STATUS_IC)
2748 break;
2749 msleep(500);
2750 } while (--delay);
2751
2752 if (delay == 0) {
2753 printk(KERN_ERR PFX
2754 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
2755 status = -1;
2756 goto out;
2757 }
2758
2759 /* Enable Ethernet Function */
2760 value =
2761 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2762 PORT_CONTROL_HH);
2763 ql_write_page0_reg(qdev, &port_regs->portControl,
2764 ((value << 16) | value));
2765
2766out:
2767 return status;
2768}
2769
2770/*
2771 * Caller holds hw_lock.
2772 */
2773static int ql_adapter_reset(struct ql3_adapter *qdev)
2774{
2775 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2776 int status = 0;
2777 u16 value;
2778 int max_wait_time;
2779
2780 set_bit(QL_RESET_ACTIVE, &qdev->flags);
2781 clear_bit(QL_RESET_DONE, &qdev->flags);
2782
2783 /*
2784 * Issue soft reset to chip.
2785 */
2786 printk(KERN_DEBUG PFX
2787 "%s: Issue soft reset to chip.\n",
2788 qdev->ndev->name);
2789 ql_write_common_reg(qdev,
2790 (u32 *) & port_regs->CommonRegs.ispControlStatus,
2791 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
2792
2793 /* Wait 3 seconds for reset to complete. */
2794 printk(KERN_DEBUG PFX
2795 "%s: Wait 10 milliseconds for reset to complete.\n",
2796 qdev->ndev->name);
2797
2798 /* Wait until the firmware tells us the Soft Reset is done */
2799 max_wait_time = 5;
2800 do {
2801 value =
2802 ql_read_common_reg(qdev,
2803 &port_regs->CommonRegs.ispControlStatus);
2804 if ((value & ISP_CONTROL_SR) == 0)
2805 break;
2806
2807 ssleep(1);
2808 } while ((--max_wait_time));
2809
2810 /*
2811 * Also, make sure that the Network Reset Interrupt bit has been
2812 * cleared after the soft reset has taken place.
2813 */
2814 value =
2815 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
2816 if (value & ISP_CONTROL_RI) {
2817 printk(KERN_DEBUG PFX
2818 "ql_adapter_reset: clearing RI after reset.\n");
2819 ql_write_common_reg(qdev,
2820 (u32 *) & port_regs->CommonRegs.
2821 ispControlStatus,
2822 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
2823 }
2824
2825 if (max_wait_time == 0) {
2826 /* Issue Force Soft Reset */
2827 ql_write_common_reg(qdev,
2828 (u32 *) & port_regs->CommonRegs.
2829 ispControlStatus,
2830 ((ISP_CONTROL_FSR << 16) |
2831 ISP_CONTROL_FSR));
2832 /*
2833 * Wait until the firmware tells us the Force Soft Reset is
2834 * done
2835 */
2836 max_wait_time = 5;
2837 do {
2838 value =
2839 ql_read_common_reg(qdev,
2840 &port_regs->CommonRegs.
2841 ispControlStatus);
2842 if ((value & ISP_CONTROL_FSR) == 0) {
2843 break;
2844 }
2845 ssleep(1);
2846 } while ((--max_wait_time));
2847 }
2848 if (max_wait_time == 0)
2849 status = 1;
2850
2851 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
2852 set_bit(QL_RESET_DONE, &qdev->flags);
2853 return status;
2854}
2855
2856static void ql_set_mac_info(struct ql3_adapter *qdev)
2857{
2858 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2859 u32 value, port_status;
2860 u8 func_number;
2861
2862 /* Get the function number */
2863 value =
2864 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2865 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
2866 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
2867 switch (value & ISP_CONTROL_FN_MASK) {
2868 case ISP_CONTROL_FN0_NET:
2869 qdev->mac_index = 0;
2870 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2871 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2872 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2873 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
2874 qdev->PHYAddr = PORT0_PHY_ADDRESS;
2875 if (port_status & PORT_STATUS_SM0)
2876 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2877 else
2878 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2879 break;
2880
2881 case ISP_CONTROL_FN1_NET:
2882 qdev->mac_index = 1;
2883 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2884 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2885 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2886 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
2887 qdev->PHYAddr = PORT1_PHY_ADDRESS;
2888 if (port_status & PORT_STATUS_SM1)
2889 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2890 else
2891 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2892 break;
2893
2894 case ISP_CONTROL_FN0_SCSI:
2895 case ISP_CONTROL_FN1_SCSI:
2896 default:
2897 printk(KERN_DEBUG PFX
2898 "%s: Invalid function number, ispControlStatus = 0x%x\n",
2899 qdev->ndev->name,value);
2900 break;
2901 }
2902 qdev->numPorts = qdev->nvram_data.numPorts;
2903}
2904
2905static void ql_display_dev_info(struct net_device *ndev)
2906{
2907 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2908 struct pci_dev *pdev = qdev->pdev;
2909
2910 printk(KERN_INFO PFX
2911 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
2912 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
2913 printk(KERN_INFO PFX
2914 "%s Interface.\n",
2915 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
2916
2917 /*
2918 * Print PCI bus width/type.
2919 */
2920 printk(KERN_INFO PFX
2921 "Bus interface is %s %s.\n",
2922 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
2923 ((qdev->pci_x) ? "PCI-X" : "PCI"));
2924
2925 printk(KERN_INFO PFX
2926 "mem IO base address adjusted = 0x%p\n",
2927 qdev->mem_map_registers);
2928 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
2929
2930 if (netif_msg_probe(qdev))
2931 printk(KERN_INFO PFX
2932 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
2933 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
2934 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
2935 ndev->dev_addr[5]);
2936}
2937
2938static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
2939{
2940 struct net_device *ndev = qdev->ndev;
2941 int retval = 0;
2942
2943 netif_stop_queue(ndev);
2944 netif_carrier_off(ndev);
2945
2946 clear_bit(QL_ADAPTER_UP,&qdev->flags);
2947 clear_bit(QL_LINK_MASTER,&qdev->flags);
2948
2949 ql_disable_interrupts(qdev);
2950
2951 free_irq(qdev->pdev->irq, ndev);
2952
2953 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
2954 printk(KERN_INFO PFX
2955 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
2956 clear_bit(QL_MSI_ENABLED,&qdev->flags);
2957 pci_disable_msi(qdev->pdev);
2958 }
2959
2960 del_timer_sync(&qdev->adapter_timer);
2961
2962 netif_poll_disable(ndev);
2963
2964 if (do_reset) {
2965 int soft_reset;
2966 unsigned long hw_flags;
2967
2968 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2969 if (ql_wait_for_drvr_lock(qdev)) {
2970 if ((soft_reset = ql_adapter_reset(qdev))) {
2971 printk(KERN_ERR PFX
2972 "%s: ql_adapter_reset(%d) FAILED!\n",
2973 ndev->name, qdev->index);
2974 }
2975 printk(KERN_ERR PFX
2976 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
2977 } else {
2978 printk(KERN_ERR PFX
2979 "%s: Could not acquire driver lock to do "
2980 "reset!\n", ndev->name);
2981 retval = -1;
2982 }
2983 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2984 }
2985 ql_free_mem_resources(qdev);
2986 return retval;
2987}
2988
2989static int ql_adapter_up(struct ql3_adapter *qdev)
2990{
2991 struct net_device *ndev = qdev->ndev;
2992 int err;
2993 unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
2994 unsigned long hw_flags;
2995
2996 if (ql_alloc_mem_resources(qdev)) {
2997 printk(KERN_ERR PFX
2998 "%s Unable to allocate buffers.\n", ndev->name);
2999 return -ENOMEM;
3000 }
3001
3002 if (qdev->msi) {
3003 if (pci_enable_msi(qdev->pdev)) {
3004 printk(KERN_ERR PFX
3005 "%s: User requested MSI, but MSI failed to "
3006 "initialize. Continuing without MSI.\n",
3007 qdev->ndev->name);
3008 qdev->msi = 0;
3009 } else {
3010 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3011 set_bit(QL_MSI_ENABLED,&qdev->flags);
3012 irq_flags &= ~SA_SHIRQ;
3013 }
3014 }
3015
3016 if ((err = request_irq(qdev->pdev->irq,
3017 ql3xxx_isr,
3018 irq_flags, ndev->name, ndev))) {
3019 printk(KERN_ERR PFX
3020 "%s: Failed to reserve interrupt %d already in use.\n",
3021 ndev->name, qdev->pdev->irq);
3022 goto err_irq;
3023 }
3024
3025 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3026
3027 if ((err = ql_wait_for_drvr_lock(qdev))) {
3028 if ((err = ql_adapter_initialize(qdev))) {
3029 printk(KERN_ERR PFX
3030 "%s: Unable to initialize adapter.\n",
3031 ndev->name);
3032 goto err_init;
3033 }
3034 printk(KERN_ERR PFX
3035 "%s: Releaseing driver lock.\n",ndev->name);
3036 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3037 } else {
3038 printk(KERN_ERR PFX
3039 "%s: Could not aquire driver lock.\n",
3040 ndev->name);
3041 goto err_lock;
3042 }
3043
3044 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3045
3046 set_bit(QL_ADAPTER_UP,&qdev->flags);
3047
3048 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3049
3050 netif_poll_enable(ndev);
3051 ql_enable_interrupts(qdev);
3052 return 0;
3053
3054err_init:
3055 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3056err_lock:
3057 free_irq(qdev->pdev->irq, ndev);
3058err_irq:
3059 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3060 printk(KERN_INFO PFX
3061 "%s: calling pci_disable_msi().\n",
3062 qdev->ndev->name);
3063 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3064 pci_disable_msi(qdev->pdev);
3065 }
3066 return err;
3067}
3068
3069static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3070{
3071 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3072 printk(KERN_ERR PFX
3073 "%s: Driver up/down cycle failed, "
3074 "closing device\n",qdev->ndev->name);
3075 dev_close(qdev->ndev);
3076 return -1;
3077 }
3078 return 0;
3079}
3080
3081static int ql3xxx_close(struct net_device *ndev)
3082{
3083 struct ql3_adapter *qdev = netdev_priv(ndev);
3084
3085 /*
3086 * Wait for device to recover from a reset.
3087 * (Rarely happens, but possible.)
3088 */
3089 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3090 msleep(50);
3091
3092 ql_adapter_down(qdev,QL_DO_RESET);
3093 return 0;
3094}
3095
3096static int ql3xxx_open(struct net_device *ndev)
3097{
3098 struct ql3_adapter *qdev = netdev_priv(ndev);
3099 return (ql_adapter_up(qdev));
3100}
3101
3102static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3103{
3104 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3105 return &qdev->stats;
3106}
3107
3108static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3109{
3110 struct ql3_adapter *qdev = netdev_priv(ndev);
3111 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3112 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3113 printk(KERN_ERR PFX
3114 "%s: mtu size of %d is not valid. Use exactly %d or "
3115 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3116 JUMBO_MTU_SIZE);
3117 return -EINVAL;
3118 }
3119
3120 if (!netif_running(ndev)) {
3121 ndev->mtu = new_mtu;
3122 return 0;
3123 }
3124
3125 ndev->mtu = new_mtu;
3126 return ql_cycle_adapter(qdev,QL_DO_RESET);
3127}
3128
3129static void ql3xxx_set_multicast_list(struct net_device *ndev)
3130{
3131 /*
3132 * We are manually parsing the list in the net_device structure.
3133 */
3134 return;
3135}
3136
3137static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3138{
3139 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3140 struct ql3xxx_port_registers __iomem *port_regs =
3141 qdev->mem_map_registers;
3142 struct sockaddr *addr = p;
3143 unsigned long hw_flags;
3144
3145 if (netif_running(ndev))
3146 return -EBUSY;
3147
3148 if (!is_valid_ether_addr(addr->sa_data))
3149 return -EADDRNOTAVAIL;
3150
3151 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3152
3153 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3154 /* Program lower 32 bits of the MAC address */
3155 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3156 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3157 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3158 ((ndev->dev_addr[2] << 24) | (ndev->
3159 dev_addr[3] << 16) |
3160 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3161
3162 /* Program top 16 bits of the MAC address */
3163 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3164 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3165 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3166 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3167 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3168
3169 return 0;
3170}
3171
3172static void ql3xxx_tx_timeout(struct net_device *ndev)
3173{
3174 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3175
3176 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3177 /*
3178 * Stop the queues, we've got a problem.
3179 */
3180 netif_stop_queue(ndev);
3181
3182 /*
3183 * Wake up the worker to process this event.
3184 */
3185 queue_work(qdev->workqueue, &qdev->tx_timeout_work);
3186}
3187
3188static void ql_reset_work(struct ql3_adapter *qdev)
3189{
3190 struct net_device *ndev = qdev->ndev;
3191 u32 value;
3192 struct ql_tx_buf_cb *tx_cb;
3193 int max_wait_time, i;
3194 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3195 unsigned long hw_flags;
3196
3197 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3198 clear_bit(QL_LINK_MASTER,&qdev->flags);
3199
3200 /*
3201 * Loop through the active list and return the skb.
3202 */
3203 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3204 tx_cb = &qdev->tx_buf[i];
3205 if (tx_cb->skb) {
3206
3207 printk(KERN_DEBUG PFX
3208 "%s: Freeing lost SKB.\n",
3209 qdev->ndev->name);
3210 pci_unmap_single(qdev->pdev,
3211 pci_unmap_addr(tx_cb, mapaddr),
3212 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
3213 dev_kfree_skb(tx_cb->skb);
3214 tx_cb->skb = NULL;
3215 }
3216 }
3217
3218 printk(KERN_ERR PFX
3219 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3221 ql_write_common_reg(qdev,
3222 &port_regs->CommonRegs.
3223 ispControlStatus,
3224 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3225 /*
3226 * Wait the for Soft Reset to Complete.
3227 */
3228 max_wait_time = 10;
3229 do {
3230 value = ql_read_common_reg(qdev,
3231 &port_regs->CommonRegs.
3232
3233 ispControlStatus);
3234 if ((value & ISP_CONTROL_SR) == 0) {
3235 printk(KERN_DEBUG PFX
3236 "%s: reset completed.\n",
3237 qdev->ndev->name);
3238 break;
3239 }
3240
3241 if (value & ISP_CONTROL_RI) {
3242 printk(KERN_DEBUG PFX
3243 "%s: clearing NRI after reset.\n",
3244 qdev->ndev->name);
3245 ql_write_common_reg(qdev,
3246 (u32 *) &
3247 port_regs->
3248 CommonRegs.
3249 ispControlStatus,
3250 ((ISP_CONTROL_RI <<
3251 16) | ISP_CONTROL_RI));
3252 }
3253
3254 ssleep(1);
3255 } while (--max_wait_time);
3256 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3257
3258 if (value & ISP_CONTROL_SR) {
3259
3260 /*
3261 * Set the reset flags and clear the board again.
3262 * Nothing else to do...
3263 */
3264 printk(KERN_ERR PFX
3265 "%s: Timed out waiting for reset to "
3266 "complete.\n", ndev->name);
3267 printk(KERN_ERR PFX
3268 "%s: Do a reset.\n", ndev->name);
3269 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3270 clear_bit(QL_RESET_START,&qdev->flags);
3271 ql_cycle_adapter(qdev,QL_DO_RESET);
3272 return;
3273 }
3274
3275 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3276 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3277 clear_bit(QL_RESET_START,&qdev->flags);
3278 ql_cycle_adapter(qdev,QL_NO_RESET);
3279 }
3280}
3281
3282static void ql_tx_timeout_work(struct ql3_adapter *qdev)
3283{
3284 ql_cycle_adapter(qdev,QL_DO_RESET);
3285}
3286
3287static void ql_get_board_info(struct ql3_adapter *qdev)
3288{
3289 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3290 u32 value;
3291
3292 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3293
3294 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3295 if (value & PORT_STATUS_64)
3296 qdev->pci_width = 64;
3297 else
3298 qdev->pci_width = 32;
3299 if (value & PORT_STATUS_X)
3300 qdev->pci_x = 1;
3301 else
3302 qdev->pci_x = 0;
3303 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3304}
3305
3306static void ql3xxx_timer(unsigned long ptr)
3307{
3308 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3309
3310 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3311 printk(KERN_DEBUG PFX
3312 "%s: Reset in progress.\n",
3313 qdev->ndev->name);
3314 goto end;
3315 }
3316
3317 ql_link_state_machine(qdev);
3318
3319 /* Restart timer on 2 second interval. */
3320end:
3321 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3322}
3323
3324static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3325 const struct pci_device_id *pci_entry)
3326{
3327 struct net_device *ndev = NULL;
3328 struct ql3_adapter *qdev = NULL;
3329 static int cards_found = 0;
3330 int pci_using_dac, err;
3331
3332 err = pci_enable_device(pdev);
3333 if (err) {
3334 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3335 pci_name(pdev));
3336 goto err_out;
3337 }
3338
3339 err = pci_request_regions(pdev, DRV_NAME);
3340 if (err) {
3341 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3342 pci_name(pdev));
3343 goto err_out_disable_pdev;
3344 }
3345
3346 pci_set_master(pdev);
3347
3348 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3349 pci_using_dac = 1;
3350 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3351 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3352 pci_using_dac = 0;
3353 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3354 }
3355
3356 if (err) {
3357 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3358 pci_name(pdev));
3359 goto err_out_free_regions;
3360 }
3361
3362 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3363 if (!ndev)
3364 goto err_out_free_regions;
3365
3366 SET_MODULE_OWNER(ndev);
3367 SET_NETDEV_DEV(ndev, &pdev->dev);
3368
3369 ndev->features = NETIF_F_LLTX;
3370 if (pci_using_dac)
3371 ndev->features |= NETIF_F_HIGHDMA;
3372
3373 pci_set_drvdata(pdev, ndev);
3374
3375 qdev = netdev_priv(ndev);
3376 qdev->index = cards_found;
3377 qdev->ndev = ndev;
3378 qdev->pdev = pdev;
3379 qdev->port_link_state = LS_DOWN;
3380 if (msi)
3381 qdev->msi = 1;
3382
3383 qdev->msg_enable = netif_msg_init(debug, default_msg);
3384
3385 qdev->mem_map_registers =
3386 ioremap_nocache(pci_resource_start(pdev, 1),
3387 pci_resource_len(qdev->pdev, 1));
3388 if (!qdev->mem_map_registers) {
3389 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3390 pci_name(pdev));
3391 goto err_out_free_ndev;
3392 }
3393
3394 spin_lock_init(&qdev->adapter_lock);
3395 spin_lock_init(&qdev->hw_lock);
3396
3397 /* Set driver entry points */
3398 ndev->open = ql3xxx_open;
3399 ndev->hard_start_xmit = ql3xxx_send;
3400 ndev->stop = ql3xxx_close;
3401 ndev->get_stats = ql3xxx_get_stats;
3402 ndev->change_mtu = ql3xxx_change_mtu;
3403 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3404 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3405 ndev->set_mac_address = ql3xxx_set_mac_address;
3406 ndev->tx_timeout = ql3xxx_tx_timeout;
3407 ndev->watchdog_timeo = 5 * HZ;
3408
3409 ndev->poll = &ql_poll;
3410 ndev->weight = 64;
3411
3412 ndev->irq = pdev->irq;
3413
3414 /* make sure the EEPROM is good */
3415 if (ql_get_nvram_params(qdev)) {
3416 printk(KERN_ALERT PFX
3417 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3418 qdev->index);
3419 goto err_out_iounmap;
3420 }
3421
3422 ql_set_mac_info(qdev);
3423
3424 /* Validate and set parameters */
3425 if (qdev->mac_index) {
3426 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3427 ETH_ALEN);
3428 } else {
3429 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3430 ETH_ALEN);
3431 }
3432 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3433
3434 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3435
3436 /* Turn off support for multicasting */
3437 ndev->flags &= ~IFF_MULTICAST;
3438
3439 /* Record PCI bus information. */
3440 ql_get_board_info(qdev);
3441
3442 /*
3443 * Set the Maximum Memory Read Byte Count value. We do this to handle
3444 * jumbo frames.
3445 */
3446 if (qdev->pci_x) {
3447 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3448 }
3449
3450 err = register_netdev(ndev);
3451 if (err) {
3452 printk(KERN_ERR PFX "%s: cannot register net device\n",
3453 pci_name(pdev));
3454 goto err_out_iounmap;
3455 }
3456
3457 /* we're going to reset, so assume we have no link for now */
3458
3459 netif_carrier_off(ndev);
3460 netif_stop_queue(ndev);
3461
3462 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3463 INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
3464 INIT_WORK(&qdev->tx_timeout_work,
3465 (void (*)(void *))ql_tx_timeout_work, qdev);
3466
3467 init_timer(&qdev->adapter_timer);
3468 qdev->adapter_timer.function = ql3xxx_timer;
3469 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3470 qdev->adapter_timer.data = (unsigned long)qdev;
3471
3472 if(!cards_found) {
3473 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
3474 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
3475 DRV_NAME, DRV_VERSION);
3476 }
3477 ql_display_dev_info(ndev);
3478
3479 cards_found++;
3480 return 0;
3481
3482err_out_iounmap:
3483 iounmap(qdev->mem_map_registers);
3484err_out_free_ndev:
3485 free_netdev(ndev);
3486err_out_free_regions:
3487 pci_release_regions(pdev);
3488err_out_disable_pdev:
3489 pci_disable_device(pdev);
3490 pci_set_drvdata(pdev, NULL);
3491err_out:
3492 return err;
3493}
3494
3495static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3496{
3497 struct net_device *ndev = pci_get_drvdata(pdev);
3498 struct ql3_adapter *qdev = netdev_priv(ndev);
3499
3500 unregister_netdev(ndev);
3501 qdev = netdev_priv(ndev);
3502
3503 ql_disable_interrupts(qdev);
3504
3505 if (qdev->workqueue) {
3506 cancel_delayed_work(&qdev->reset_work);
3507 cancel_delayed_work(&qdev->tx_timeout_work);
3508 destroy_workqueue(qdev->workqueue);
3509 qdev->workqueue = NULL;
3510 }
3511
3512 iounmap((void *)qdev->mmap_virt_base);
3513 pci_release_regions(pdev);
3514 pci_set_drvdata(pdev, NULL);
3515 free_netdev(ndev);
3516}
3517
3518static struct pci_driver ql3xxx_driver = {
3519
3520 .name = DRV_NAME,
3521 .id_table = ql3xxx_pci_tbl,
3522 .probe = ql3xxx_probe,
3523 .remove = __devexit_p(ql3xxx_remove),
3524};
3525
3526static int __init ql3xxx_init_module(void)
3527{
3528 return pci_register_driver(&ql3xxx_driver);
3529}
3530
3531static void __exit ql3xxx_exit(void)
3532{
3533 pci_unregister_driver(&ql3xxx_driver);
3534}
3535
3536module_init(ql3xxx_init_module);
3537module_exit(ql3xxx_exit);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
new file mode 100644
index 000000000000..9492cee6b083
--- /dev/null
+++ b/drivers/net/qla3xxx.h
@@ -0,0 +1,1194 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7#ifndef _QLA3XXX_H_
8#define _QLA3XXX_H_
9
10/*
11 * IOCB Definitions...
12 */
13#pragma pack(1)
14
15#define OPCODE_OB_MAC_IOCB_FN0 0x01
16#define OPCODE_OB_MAC_IOCB_FN2 0x21
17#define OPCODE_OB_TCP_IOCB_FN0 0x03
18#define OPCODE_OB_TCP_IOCB_FN2 0x23
19#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
20#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_IP_IOCB 0xFA
25#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB
28
29#define OPCODE_FUNC_ID_MASK 0x30
30#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
31#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
32#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
33
34#define FN0_MA_BITS_MASK 0x00
35#define FN1_MA_BITS_MASK 0x80
36
37struct ob_mac_iocb_req {
38 u8 opcode;
39 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0
41#define OB_MAC_IOCB_REQ_F 0x20
42#define OB_MAC_IOCB_REQ_X 0x10
43#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0;
46
47 __le32 transaction_id;
48 __le16 data_len;
49 __le16 reserved1;
50 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low;
53 __le32 buf_addr0_high;
54 __le32 buf_0_len;
55 __le32 buf_addr1_low;
56 __le32 buf_addr1_high;
57 __le32 buf_1_len;
58 __le32 buf_addr2_low;
59 __le32 buf_addr2_high;
60 __le32 buf_2_len;
61 __le32 reserved4;
62 __le32 reserved5;
63};
64/*
65 * The following constants define control bits for buffer
66 * length fields for all IOCB's.
67 */
68#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
69#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
70#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
71#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
72
73struct ob_mac_iocb_rsp {
74 u8 opcode;
75 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08
77#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01
79
80 __le16 reserved0;
81 __le32 transaction_id;
82 __le32 reserved1;
83 __le32 reserved2;
84};
85
86struct ib_mac_iocb_rsp {
87 u8 opcode;
88 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40
91#define IB_MAC_IOCB_RSP_H0 0x20
92#define IB_MAC_IOCB_RSP_B 0x10
93#define IB_MAC_IOCB_RSP_M 0x08
94#define IB_MAC_IOCB_RSP_MA 0x07
95
96 __le16 length;
97 __le32 reserved;
98 __le32 ial_low;
99 __le32 ial_high;
100
101};
102
103struct ob_ip_iocb_req {
104 u8 opcode;
105 __le16 flags;
106#define OB_IP_IOCB_REQ_O 0x100
107#define OB_IP_IOCB_REQ_H 0x008
108#define OB_IP_IOCB_REQ_U 0x004
109#define OB_IP_IOCB_REQ_D 0x002
110#define OB_IP_IOCB_REQ_I 0x001
111
112 u8 reserved0;
113
114 __le32 transaction_id;
115 __le16 data_len;
116 __le16 reserved1;
117 __le32 hncb_ptr_low;
118 __le32 hncb_ptr_high;
119 __le32 buf_addr0_low;
120 __le32 buf_addr0_high;
121 __le32 buf_0_len;
122 __le32 buf_addr1_low;
123 __le32 buf_addr1_high;
124 __le32 buf_1_len;
125 __le32 buf_addr2_low;
126 __le32 buf_addr2_high;
127 __le32 buf_2_len;
128 __le32 reserved2;
129 __le32 reserved3;
130};
131
132/* defines for BufferLength fields above */
133#define OB_IP_IOCB_REQ_E 0x80000000
134#define OB_IP_IOCB_REQ_C 0x40000000
135#define OB_IP_IOCB_REQ_L 0x20000000
136#define OB_IP_IOCB_REQ_R 0x10000000
137
138struct ob_ip_iocb_rsp {
139 u8 opcode;
140 u8 flags;
141#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02
144#define OB_MAC_IOCB_RSP_I 0x01
145
146 __le16 reserved0;
147 __le32 transaction_id;
148 __le32 reserved1;
149 __le32 reserved2;
150};
151
152struct ob_tcp_iocb_req {
153 u8 opcode;
154
155 u8 flags0;
156#define OB_TCP_IOCB_REQ_P 0x80
157#define OB_TCP_IOCB_REQ_CI 0x20
158#define OB_TCP_IOCB_REQ_H 0x10
159#define OB_TCP_IOCB_REQ_LN 0x08
160#define OB_TCP_IOCB_REQ_K 0x04
161#define OB_TCP_IOCB_REQ_D 0x02
162#define OB_TCP_IOCB_REQ_I 0x01
163
164 u8 flags1;
165#define OB_TCP_IOCB_REQ_OSM 0x40
166#define OB_TCP_IOCB_REQ_URG 0x20
167#define OB_TCP_IOCB_REQ_ACK 0x10
168#define OB_TCP_IOCB_REQ_PSH 0x08
169#define OB_TCP_IOCB_REQ_RST 0x04
170#define OB_TCP_IOCB_REQ_SYN 0x02
171#define OB_TCP_IOCB_REQ_FIN 0x01
172
173 u8 options_len;
174#define OB_TCP_IOCB_REQ_OMASK 0xF0
175#define OB_TCP_IOCB_REQ_SHIFT 4
176
177 __le32 transaction_id;
178 __le32 data_len;
179 __le32 hncb_ptr_low;
180 __le32 hncb_ptr_high;
181 __le32 buf_addr0_low;
182 __le32 buf_addr0_high;
183 __le32 buf_0_len;
184 __le32 buf_addr1_low;
185 __le32 buf_addr1_high;
186 __le32 buf_1_len;
187 __le32 buf_addr2_low;
188 __le32 buf_addr2_high;
189 __le32 buf_2_len;
190 __le32 time_stamp;
191 __le32 reserved1;
192};
193
194struct ob_tcp_iocb_rsp {
195 u8 opcode;
196
197 u8 flags0;
198#define OB_TCP_IOCB_RSP_C 0x20
199#define OB_TCP_IOCB_RSP_H 0x10
200#define OB_TCP_IOCB_RSP_LN 0x08
201#define OB_TCP_IOCB_RSP_K 0x04
202#define OB_TCP_IOCB_RSP_D 0x02
203#define OB_TCP_IOCB_RSP_I 0x01
204
205 u8 flags1;
206#define OB_TCP_IOCB_RSP_E 0x10
207#define OB_TCP_IOCB_RSP_W 0x08
208#define OB_TCP_IOCB_RSP_P 0x04
209#define OB_TCP_IOCB_RSP_T 0x02
210#define OB_TCP_IOCB_RSP_F 0x01
211
212 u8 state;
213#define OB_TCP_IOCB_RSP_SMASK 0xF0
214#define OB_TCP_IOCB_RSP_SHIFT 4
215
216 __le32 transaction_id;
217 __le32 local_ncb_ptr;
218 __le32 reserved0;
219};
220
221struct ib_ip_iocb_rsp {
222 u8 opcode;
223 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40
226#define IB_IP_IOCB_RSP_H0 0x20
227#define IB_IP_IOCB_RSP_B 0x10
228#define IB_IP_IOCB_RSP_M 0x08
229#define IB_IP_IOCB_RSP_MA 0x07
230
231 __le16 length;
232 __le16 checksum;
233 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low;
236 __le32 ial_high;
237};
238
239struct ib_tcp_iocb_rsp {
240 u8 opcode;
241 u8 flags;
242#define IB_TCP_IOCB_RSP_P 0x80
243#define IB_TCP_IOCB_RSP_T 0x40
244#define IB_TCP_IOCB_RSP_D 0x20
245#define IB_TCP_IOCB_RSP_N 0x10
246#define IB_TCP_IOCB_RSP_IP 0x03
247#define IB_TCP_FLAG_MASK 0xf0
248#define IB_TCP_FLAG_IOCB_SYN 0x00
249
250#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
251
252 __le16 length;
253 __le32 hncb_ref_num;
254 __le32 ial_low;
255 __le32 ial_high;
256};
257
258struct net_rsp_iocb {
259 u8 opcode;
260 u8 flags;
261 __le16 reserved0;
262 __le32 reserved[3];
263};
264#pragma pack()
265
266/*
267 * Register Definitions...
268 */
269#define PORT0_PHY_ADDRESS 0x1e00
270#define PORT1_PHY_ADDRESS 0x1f00
271
272#define ETHERNET_CRC_SIZE 4
273
274#define MII_SCAN_REGISTER 0x00000001
275
276/* 32-bit ispControlStatus */
277enum {
278 ISP_CONTROL_NP_MASK = 0x0003,
279 ISP_CONTROL_NP_PCSR = 0x0000,
280 ISP_CONTROL_NP_HMCR = 0x0001,
281 ISP_CONTROL_NP_LRAMCR = 0x0002,
282 ISP_CONTROL_NP_PSR = 0x0003,
283 ISP_CONTROL_RI = 0x0008,
284 ISP_CONTROL_CI = 0x0010,
285 ISP_CONTROL_PI = 0x0020,
286 ISP_CONTROL_IN = 0x0040,
287 ISP_CONTROL_BE = 0x0080,
288 ISP_CONTROL_FN_MASK = 0x0700,
289 ISP_CONTROL_FN0_NET = 0x0400,
290 ISP_CONTROL_FN0_SCSI = 0x0500,
291 ISP_CONTROL_FN1_NET = 0x0600,
292 ISP_CONTROL_FN1_SCSI = 0x0700,
293 ISP_CONTROL_LINK_DN_0 = 0x0800,
294 ISP_CONTROL_LINK_DN_1 = 0x1000,
295 ISP_CONTROL_FSR = 0x2000,
296 ISP_CONTROL_FE = 0x4000,
297 ISP_CONTROL_SR = 0x8000,
298};
299
300/* 32-bit ispInterruptMaskReg */
301enum {
302 ISP_IMR_ENABLE_INT = 0x0004,
303 ISP_IMR_DISABLE_RESET_INT = 0x0008,
304 ISP_IMR_DISABLE_CMPL_INT = 0x0010,
305 ISP_IMR_DISABLE_PROC_INT = 0x0020,
306};
307
308/* 32-bit serialPortInterfaceReg */
309enum {
310 ISP_SERIAL_PORT_IF_CLK = 0x0001,
311 ISP_SERIAL_PORT_IF_CS = 0x0002,
312 ISP_SERIAL_PORT_IF_D0 = 0x0004,
313 ISP_SERIAL_PORT_IF_DI = 0x0008,
314 ISP_NVRAM_MASK = (0x000F << 16),
315 ISP_SERIAL_PORT_IF_WE = 0x0010,
316 ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
317 ISP_SERIAL_PORT_IF_SCI = 0x0400,
318 ISP_SERIAL_PORT_IF_SC0 = 0x0800,
319 ISP_SERIAL_PORT_IF_SCE = 0x1000,
320 ISP_SERIAL_PORT_IF_SDI = 0x2000,
321 ISP_SERIAL_PORT_IF_SDO = 0x4000,
322 ISP_SERIAL_PORT_IF_SDE = 0x8000,
323 ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
324};
325
326/* semaphoreReg */
327enum {
328 QL_RESOURCE_MASK_BASE_CODE = 0x7,
329 QL_RESOURCE_BITS_BASE_CODE = 0x4,
330 QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
331 QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
332 QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
333 QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
334 QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
335 QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
336 QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
337 QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
338 QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
339 QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
340};
341
342 /*
343 * QL3XXX memory-mapped registers
344 * QL3XXX has 4 "pages" of registers, each page occupying
345 * 256 bytes. Each page has a "common" area at the start and then
346 * page-specific registers after that.
347 */
348struct ql3xxx_common_registers {
349 u32 MB0; /* Offset 0x00 */
350 u32 MB1; /* Offset 0x04 */
351 u32 MB2; /* Offset 0x08 */
352 u32 MB3; /* Offset 0x0c */
353 u32 MB4; /* Offset 0x10 */
354 u32 MB5; /* Offset 0x14 */
355 u32 MB6; /* Offset 0x18 */
356 u32 MB7; /* Offset 0x1c */
357 u32 flashBiosAddr;
358 u32 flashBiosData;
359 u32 ispControlStatus;
360 u32 ispInterruptMaskReg;
361 u32 serialPortInterfaceReg;
362 u32 semaphoreReg;
363 u32 reqQProducerIndex;
364 u32 rspQConsumerIndex;
365
366 u32 rxLargeQProducerIndex;
367 u32 rxSmallQProducerIndex;
368 u32 arcMadiCommand;
369 u32 arcMadiData;
370};
371
372enum {
373 EXT_HW_CONFIG_SP_MASK = 0x0006,
374 EXT_HW_CONFIG_SP_NONE = 0x0000,
375 EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
376 EXT_HW_CONFIG_SP_ECC = 0x0004,
377 EXT_HW_CONFIG_SP_ECCx = 0x0006,
378 EXT_HW_CONFIG_SIZE_MASK = 0x0060,
379 EXT_HW_CONFIG_SIZE_128M = 0x0000,
380 EXT_HW_CONFIG_SIZE_256M = 0x0020,
381 EXT_HW_CONFIG_SIZE_512M = 0x0040,
382 EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
383 EXT_HW_CONFIG_PD = 0x0080,
384 EXT_HW_CONFIG_FW = 0x0200,
385 EXT_HW_CONFIG_US = 0x0400,
386 EXT_HW_CONFIG_DCS_MASK = 0x1800,
387 EXT_HW_CONFIG_DCS_9MA = 0x0000,
388 EXT_HW_CONFIG_DCS_15MA = 0x0800,
389 EXT_HW_CONFIG_DCS_18MA = 0x1000,
390 EXT_HW_CONFIG_DCS_24MA = 0x1800,
391 EXT_HW_CONFIG_DDS_MASK = 0x6000,
392 EXT_HW_CONFIG_DDS_9MA = 0x0000,
393 EXT_HW_CONFIG_DDS_15MA = 0x2000,
394 EXT_HW_CONFIG_DDS_18MA = 0x4000,
395 EXT_HW_CONFIG_DDS_24MA = 0x6000,
396};
397
398/* InternalChipConfig */
399enum {
400 INTERNAL_CHIP_DM = 0x0001,
401 INTERNAL_CHIP_SD = 0x0002,
402 INTERNAL_CHIP_RAP_MASK = 0x000C,
403 INTERNAL_CHIP_RAP_RR = 0x0000,
404 INTERNAL_CHIP_RAP_NRM = 0x0004,
405 INTERNAL_CHIP_RAP_ERM = 0x0008,
406 INTERNAL_CHIP_RAP_ERMx = 0x000C,
407 INTERNAL_CHIP_WE = 0x0010,
408 INTERNAL_CHIP_EF = 0x0020,
409 INTERNAL_CHIP_FR = 0x0040,
410 INTERNAL_CHIP_FW = 0x0080,
411 INTERNAL_CHIP_FI = 0x0100,
412 INTERNAL_CHIP_FT = 0x0200,
413};
414
415/* portControl */
416enum {
417 PORT_CONTROL_DS = 0x0001,
418 PORT_CONTROL_HH = 0x0002,
419 PORT_CONTROL_EI = 0x0004,
420 PORT_CONTROL_ET = 0x0008,
421 PORT_CONTROL_EF = 0x0010,
422 PORT_CONTROL_DRM = 0x0020,
423 PORT_CONTROL_RLB = 0x0040,
424 PORT_CONTROL_RCB = 0x0080,
425 PORT_CONTROL_MAC = 0x0100,
426 PORT_CONTROL_IPV = 0x0200,
427 PORT_CONTROL_IFP = 0x0400,
428 PORT_CONTROL_ITP = 0x0800,
429 PORT_CONTROL_FI = 0x1000,
430 PORT_CONTROL_DFP = 0x2000,
431 PORT_CONTROL_OI = 0x4000,
432 PORT_CONTROL_CC = 0x8000,
433};
434
435/* portStatus */
436enum {
437 PORT_STATUS_SM0 = 0x0001,
438 PORT_STATUS_SM1 = 0x0002,
439 PORT_STATUS_X = 0x0008,
440 PORT_STATUS_DL = 0x0080,
441 PORT_STATUS_IC = 0x0200,
442 PORT_STATUS_MRC = 0x0400,
443 PORT_STATUS_NL = 0x0800,
444 PORT_STATUS_REV_ID_MASK = 0x7000,
445 PORT_STATUS_REV_ID_1 = 0x1000,
446 PORT_STATUS_REV_ID_2 = 0x2000,
447 PORT_STATUS_REV_ID_3 = 0x3000,
448 PORT_STATUS_64 = 0x8000,
449 PORT_STATUS_UP0 = 0x10000,
450 PORT_STATUS_AC0 = 0x20000,
451 PORT_STATUS_AE0 = 0x40000,
452 PORT_STATUS_UP1 = 0x100000,
453 PORT_STATUS_AC1 = 0x200000,
454 PORT_STATUS_AE1 = 0x400000,
455 PORT_STATUS_F0_ENABLED = 0x1000000,
456 PORT_STATUS_F1_ENABLED = 0x2000000,
457 PORT_STATUS_F2_ENABLED = 0x4000000,
458 PORT_STATUS_F3_ENABLED = 0x8000000,
459};
460
461/* macMIIMgmtControlReg */
462enum {
463 MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
464 MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
465 MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
466 MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
467 MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
468 MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
469 MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
470 MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
471 MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
472 MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
473};
474
475/* macMIIMgmtControlReg */
476enum {
477 MAC_MII_CONTROL_RC = 0x0001,
478 MAC_MII_CONTROL_SC = 0x0002,
479 MAC_MII_CONTROL_AS = 0x0004,
480 MAC_MII_CONTROL_NP = 0x0008,
481 MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
482 MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
483 MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
484 MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
485 MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
486 MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
487 MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
488 MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
489 MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
490 MAC_MII_CONTROL_RM = 0x8000,
491};
492
493/* macMIIStatusReg */
494enum {
495 MAC_MII_STATUS_BSY = 0x0001,
496 MAC_MII_STATUS_SC = 0x0002,
497 MAC_MII_STATUS_NV = 0x0004,
498};
499
500enum {
501 MAC_CONFIG_REG_PE = 0x0001,
502 MAC_CONFIG_REG_TF = 0x0002,
503 MAC_CONFIG_REG_RF = 0x0004,
504 MAC_CONFIG_REG_FD = 0x0008,
505 MAC_CONFIG_REG_GM = 0x0010,
506 MAC_CONFIG_REG_LB = 0x0020,
507 MAC_CONFIG_REG_SR = 0x8000,
508};
509
510enum {
511 MAC_HALF_DUPLEX_REG_ED = 0x10000,
512 MAC_HALF_DUPLEX_REG_NB = 0x20000,
513 MAC_HALF_DUPLEX_REG_BNB = 0x40000,
514 MAC_HALF_DUPLEX_REG_ALT = 0x80000,
515};
516
517enum {
518 IP_ADDR_INDEX_REG_MASK = 0x000f,
519 IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
520 IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
521 IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
522 IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
523 IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
527};
528
529enum {
530 PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
531 PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
532 PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
533 PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
534 PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
535 PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
536 PROBE_MUX_ADDR_REG_UP = 0x4000,
537 PROBE_MUX_ADDR_REG_RE = 0x8000,
538};
539
540enum {
541 STATISTICS_INDEX_REG_MASK = 0x01ff,
542 STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
543 STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
544 STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
545 STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
546 STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
547 STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
548 STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
549 STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
550 STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
551 STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
552 STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
553 STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
554 STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
555 STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
556 STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
557 STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
558 STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
559 STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
560 STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
561 STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
562 STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
563 STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
564 STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
565 STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
566 STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
567 STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
568 STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
569 STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
570 STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
571 STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
572 STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
573 STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
574 STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
575 STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
576 STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
577 STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
578 STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
579 STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
580 STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
581 STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
582 STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
583 STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
584 STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
585 STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
586 STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
587 STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
588 STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
589 STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
590 STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
591 STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
592 STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
593 STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
594};
595
596enum {
597 PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
598 PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
599 PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
600 PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
601 PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
602 PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
603 PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
604 PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
605 PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
606 PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
607 PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
608 PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
609 PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
610 PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
611 PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
612 PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
613 PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
614 PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
615 PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
616 PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
617 PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
618 PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
619 PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
620 PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
621 PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
622};
623
624/*
625 * port control and status page - page 0
626 */
627
628struct ql3xxx_port_registers {
629 struct ql3xxx_common_registers CommonRegs;
630
631 u32 ExternalHWConfig;
632 u32 InternalChipConfig;
633 u32 portControl;
634 u32 portStatus;
635 u32 macAddrIndirectPtrReg;
636 u32 macAddrDataReg;
637 u32 macMIIMgmtControlReg;
638 u32 macMIIMgmtAddrReg;
639 u32 macMIIMgmtDataReg;
640 u32 macMIIStatusReg;
641 u32 mac0ConfigReg;
642 u32 mac0IpgIfgReg;
643 u32 mac0HalfDuplexReg;
644 u32 mac0MaxFrameLengthReg;
645 u32 mac0PauseThresholdReg;
646 u32 mac1ConfigReg;
647 u32 mac1IpgIfgReg;
648 u32 mac1HalfDuplexReg;
649 u32 mac1MaxFrameLengthReg;
650 u32 mac1PauseThresholdReg;
651 u32 ipAddrIndexReg;
652 u32 ipAddrDataReg;
653 u32 ipReassemblyTimeout;
654 u32 tcpMaxWindow;
655 u32 currentTcpTimestamp[2];
656 u32 internalRamRWAddrReg;
657 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2];
661 u32 fpgaRevID;
662 u32 localRamAddr;
663 u32 localRamDataAutoIncr;
664 u32 localRamDataNonIncr;
665 u32 gpOutput;
666 u32 gpInput;
667 u32 probeMuxAddr;
668 u32 probeMuxData;
669 u32 statisticsIndexReg;
670 u32 statisticsReadDataRegAutoIncr;
671 u32 statisticsReadDataRegNoIncr;
672 u32 PortFatalErrStatus;
673};
674
675/*
676 * port host memory config page - page 1
677 */
678struct ql3xxx_host_memory_registers {
679 struct ql3xxx_common_registers CommonRegs;
680
681 u32 reserved[12];
682
683 /* Network Request Queue */
684 u32 reqConsumerIndex;
685 u32 reqConsumerIndexAddrLow;
686 u32 reqConsumerIndexAddrHigh;
687 u32 reqBaseAddrLow;
688 u32 reqBaseAddrHigh;
689 u32 reqLength;
690
691 /* Network Completion Queue */
692 u32 rspProducerIndex;
693 u32 rspProducerIndexAddrLow;
694 u32 rspProducerIndexAddrHigh;
695 u32 rspBaseAddrLow;
696 u32 rspBaseAddrHigh;
697 u32 rspLength;
698
699 /* RX Large Buffer Queue */
700 u32 rxLargeQConsumerIndex;
701 u32 rxLargeQBaseAddrLow;
702 u32 rxLargeQBaseAddrHigh;
703 u32 rxLargeQLength;
704 u32 rxLargeBufferLength;
705
706 /* RX Small Buffer Queue */
707 u32 rxSmallQConsumerIndex;
708 u32 rxSmallQBaseAddrLow;
709 u32 rxSmallQBaseAddrHigh;
710 u32 rxSmallQLength;
711 u32 rxSmallBufferLength;
712
713};
714
715/*
716 * port local RAM page - page 2
717 */
718struct ql3xxx_local_ram_registers {
719 struct ql3xxx_common_registers CommonRegs;
720 u32 bufletSize;
721 u32 maxBufletCount;
722 u32 currentBufletCount;
723 u32 reserved;
724 u32 freeBufletThresholdLow;
725 u32 freeBufletThresholdHigh;
726 u32 ipHashTableBase;
727 u32 ipHashTableCount;
728 u32 tcpHashTableBase;
729 u32 tcpHashTableCount;
730 u32 ncbBase;
731 u32 maxNcbCount;
732 u32 currentNcbCount;
733 u32 drbBase;
734 u32 maxDrbCount;
735 u32 currentDrbCount;
736};
737
738/*
739 * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
740 */
741
742#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
743#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
744
745/*
746 * I/O register
747 */
748
749enum {
750 CONTROL_REG = 0,
751 STATUS_REG = 1,
752 PHY_STAT_LINK_UP = 0x0004,
753 PHY_CTRL_LOOPBACK = 0x4000,
754
755 PETBI_CONTROL_REG = 0x00,
756 PETBI_CTRL_SOFT_RESET = 0x8000,
757 PETBI_CTRL_AUTO_NEG = 0x1000,
758 PETBI_CTRL_RESTART_NEG = 0x0200,
759 PETBI_CTRL_FULL_DUPLEX = 0x0100,
760 PETBI_CTRL_SPEED_1000 = 0x0040,
761
762 PETBI_STATUS_REG = 0x01,
763 PETBI_STAT_NEG_DONE = 0x0020,
764 PETBI_STAT_LINK_UP = 0x0004,
765
766 PETBI_NEG_ADVER = 0x04,
767 PETBI_NEG_PAUSE = 0x0080,
768 PETBI_NEG_PAUSE_MASK = 0x0180,
769 PETBI_NEG_DUPLEX = 0x0020,
770 PETBI_NEG_DUPLEX_MASK = 0x0060,
771
772 PETBI_NEG_PARTNER = 0x05,
773 PETBI_NEG_ERROR_MASK = 0x3000,
774
775 PETBI_EXPANSION_REG = 0x06,
776 PETBI_EXP_PAGE_RX = 0x0002,
777
778 PETBI_TBI_CTRL = 0x11,
779 PETBI_TBI_RESET = 0x8000,
780 PETBI_TBI_AUTO_SENSE = 0x0100,
781 PETBI_TBI_SERDES_MODE = 0x0010,
782 PETBI_TBI_SERDES_WRAP = 0x0002,
783
784 AUX_CONTROL_STATUS = 0x1c,
785 PHY_AUX_NEG_DONE = 0x8000,
786 PHY_NEG_PARTNER = 5,
787 PHY_AUX_DUPLEX_STAT = 0x0020,
788 PHY_AUX_SPEED_STAT = 0x0018,
789 PHY_AUX_NO_HW_STRAP = 0x0004,
790 PHY_AUX_RESET_STICK = 0x0002,
791 PHY_NEG_PAUSE = 0x0400,
792 PHY_CTRL_SOFT_RESET = 0x8000,
793 PHY_NEG_ADVER = 4,
794 PHY_NEG_ADV_SPEED = 0x01e0,
795 PHY_CTRL_RESTART_NEG = 0x0200,
796};
797enum {
798/* AM29LV Flash definitions */
799 FM93C56A_START = 0x1,
800/* Commands */
801 FM93C56A_READ = 0x2,
802 FM93C56A_WEN = 0x0,
803 FM93C56A_WRITE = 0x1,
804 FM93C56A_WRITE_ALL = 0x0,
805 FM93C56A_WDS = 0x0,
806 FM93C56A_ERASE = 0x3,
807 FM93C56A_ERASE_ALL = 0x0,
808/* Command Extentions */
809 FM93C56A_WEN_EXT = 0x3,
810 FM93C56A_WRITE_ALL_EXT = 0x1,
811 FM93C56A_WDS_EXT = 0x0,
812 FM93C56A_ERASE_ALL_EXT = 0x2,
813/* Special Bits */
814 FM93C56A_READ_DUMMY_BITS = 1,
815 FM93C56A_READY = 0,
816 FM93C56A_BUSY = 1,
817 FM93C56A_CMD_BITS = 2,
818/* AM29LV Flash definitions */
819 FM93C56A_SIZE_8 = 0x100,
820 FM93C56A_SIZE_16 = 0x80,
821 FM93C66A_SIZE_8 = 0x200,
822 FM93C66A_SIZE_16 = 0x100,
823 FM93C86A_SIZE_16 = 0x400,
824/* Address Bits */
825 FM93C56A_NO_ADDR_BITS_16 = 8,
826 FM93C56A_NO_ADDR_BITS_8 = 9,
827 FM93C86A_NO_ADDR_BITS_16 = 10,
828/* Data Bits */
829 FM93C56A_DATA_BITS_16 = 16,
830 FM93C56A_DATA_BITS_8 = 8,
831};
832enum {
833/* Auburn Bits */
834 AUBURN_EEPROM_DI = 0x8,
835 AUBURN_EEPROM_DI_0 = 0x0,
836 AUBURN_EEPROM_DI_1 = 0x8,
837 AUBURN_EEPROM_DO = 0x4,
838 AUBURN_EEPROM_DO_0 = 0x0,
839 AUBURN_EEPROM_DO_1 = 0x4,
840 AUBURN_EEPROM_CS = 0x2,
841 AUBURN_EEPROM_CS_0 = 0x0,
842 AUBURN_EEPROM_CS_1 = 0x2,
843 AUBURN_EEPROM_CLK_RISE = 0x1,
844 AUBURN_EEPROM_CLK_FALL = 0x0,
845};
846enum {EEPROM_SIZE = FM93C86A_SIZE_16,
847 EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
848 EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
849};
850
851/*
852 * MAC Config data structure
853 */
854 struct eeprom_port_cfg {
855 u16 etherMtu_mac;
856 u16 pauseThreshold_mac;
857 u16 resumeThreshold_mac;
858 u16 portConfiguration;
859#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
860#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
861#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
862#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
863#define PORT_CONFIG_1000MB_SPEED 0x0400
864#define PORT_CONFIG_100MB_SPEED 0x0200
865#define PORT_CONFIG_10MB_SPEED 0x0100
866#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
867 u16 reserved[12];
868
869};
870
871/*
872 * BIOS data structure
873 */
874struct eeprom_bios_cfg {
875 u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
876
877 u8 bootID0:7, boodID0Valid:1;
878 u8 bootLun0[8];
879
880 u8 bootID1:7, boodID1Valid:1;
881 u8 bootLun1[8];
882
883 u16 MaxLunsTrgt;
884 u8 reserved[10];
885};
886
887/*
888 * Function Specific Data structure
889 */
890struct eeprom_function_cfg {
891 u8 reserved[30];
892 u8 macAddress[6];
893 u8 macAddressSecondary[6];
894
895 u16 subsysVendorId;
896 u16 subsysDeviceId;
897};
898
899/*
900 * EEPROM format
901 */
902struct eeprom_data {
903 u8 asicId[4];
904 u8 version;
905 u8 numPorts;
906 u16 boardId;
907
908#define EEPROM_BOARDID_STR_SIZE 16
909#define EEPROM_SERIAL_NUM_SIZE 16
910
911 u8 boardIdStr[16];
912 u8 serialNumber[16];
913 u16 extHwConfig;
914 struct eeprom_port_cfg macCfg_port0;
915 struct eeprom_port_cfg macCfg_port1;
916 u16 bufletSize;
917 u16 bufletCount;
918 u16 tcpWindowThreshold50;
919 u16 tcpWindowThreshold25;
920 u16 tcpWindowThreshold0;
921 u16 ipHashTableBaseHi;
922 u16 ipHashTableBaseLo;
923 u16 ipHashTableSize;
924 u16 tcpHashTableBaseHi;
925 u16 tcpHashTableBaseLo;
926 u16 tcpHashTableSize;
927 u16 ncbTableBaseHi;
928 u16 ncbTableBaseLo;
929 u16 ncbTableSize;
930 u16 drbTableBaseHi;
931 u16 drbTableBaseLo;
932 u16 drbTableSize;
933 u16 reserved_142[4];
934 u16 ipReassemblyTimeout;
935 u16 tcpMaxWindowSize;
936 u16 ipSecurity;
937#define IPSEC_CONFIG_PRESENT 0x0001
938 u8 reserved_156[294];
939 u16 qDebug[8];
940 struct eeprom_function_cfg funcCfg_fn0;
941 u16 reserved_510;
942 u8 oemSpace[432];
943 struct eeprom_bios_cfg biosCfg_fn1;
944 struct eeprom_function_cfg funcCfg_fn1;
945 u16 reserved_1022;
946 u8 reserved_1024[464];
947 struct eeprom_function_cfg funcCfg_fn2;
948 u16 reserved_1534;
949 u8 reserved_1536[432];
950 struct eeprom_bios_cfg biosCfg_fn3;
951 struct eeprom_function_cfg funcCfg_fn3;
952 u16 checksum;
953};
954
955/*
956 * General definitions...
957 */
958
959/*
960 * Below are a number compiler switches for controlling driver behavior.
961 * Some are not supported under certain conditions and are notated as such.
962 */
963
964#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022
966
967/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN
969#define JUMBO_MTU_SIZE 9000
970#define VLAN_ID_LEN 2
971
972/* Request Queue Related Definitions */
973#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
974
975/* Response Queue Related Definitions */
976#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
977
978/* Transmit and Receive Buffers */
979#define NUM_LBUFQ_ENTRIES 128
980#define NUM_SBUFQ_ENTRIES 64
981#define QL_SMALL_BUFFER_SIZE 32
982#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
983(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
984 /* Each send has at least control block. This is how many we keep. */
985#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
986#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
987#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
988/*
989 * Large & Small Buffers for Receives
990 */
991struct lrg_buf_q_entry {
992
993 u32 addr0_lower;
994#define IAL_LAST_ENTRY 0x00000001
995#define IAL_CONT_ENTRY 0x00000002
996#define IAL_FLAG_MASK 0x00000003
997 u32 addr0_upper;
998 u32 addr1_lower;
999 u32 addr1_upper;
1000 u32 addr2_lower;
1001 u32 addr2_upper;
1002 u32 addr3_lower;
1003 u32 addr3_upper;
1004 u32 addr4_lower;
1005 u32 addr4_upper;
1006 u32 addr5_lower;
1007 u32 addr5_upper;
1008 u32 addr6_lower;
1009 u32 addr6_upper;
1010 u32 addr7_lower;
1011 u32 addr7_upper;
1012
1013};
1014
1015struct bufq_addr_element {
1016 u32 addr_low;
1017 u32 addr_high;
1018};
1019
1020#define QL_NO_RESET 0
1021#define QL_DO_RESET 1
1022
1023enum link_state_t {
1024 LS_UNKNOWN = 0,
1025 LS_DOWN,
1026 LS_DEGRADE,
1027 LS_RECOVER,
1028 LS_UP,
1029};
1030
1031struct ql_rcv_buf_cb {
1032 struct ql_rcv_buf_cb *next;
1033 struct sk_buff *skb;
1034 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1035 DECLARE_PCI_UNMAP_LEN(maplen);
1036 __le32 buf_phy_addr_low;
1037 __le32 buf_phy_addr_high;
1038 int index;
1039};
1040
1041struct ql_tx_buf_cb {
1042 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1045 DECLARE_PCI_UNMAP_LEN(maplen);
1046};
1047
1048/* definitions for type field */
1049#define QL_BUF_TYPE_MACIOCB 0x01
1050#define QL_BUF_TYPE_IPIOCB 0x02
1051#define QL_BUF_TYPE_TCPIOCB 0x03
1052
1053/* qdev->flags definitions. */
1054enum { QL_RESET_DONE = 1, /* Reset finished. */
1055 QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
1056 QL_RESET_START = 3, /* Please reset the chip. */
1057 QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
1058 QL_TX_TIMEOUT = 5, /* Timeout in progress. */
1059 QL_LINK_MASTER = 6, /* This driver controls the link. */
1060 QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
1061 QL_THREAD_UP = 8, /* This flag is available. */
1062 QL_LINK_UP = 9, /* Link Status. */
1063 QL_ALLOC_REQ_RSP_Q_DONE = 10,
1064 QL_ALLOC_BUFQS_DONE = 11,
1065 QL_ALLOC_SMALL_BUF_DONE = 12,
1066 QL_LINK_OPTICAL = 13,
1067 QL_MSI_ENABLED = 14,
1068};
1069
1070/*
1071 * ql3_adapter - The main Adapter structure definition.
1072 * This structure has all fields relevant to the hardware.
1073 */
1074
1075struct ql3_adapter {
1076 u32 reserved_00;
1077 unsigned long flags;
1078
1079 /* PCI Configuration information for this device */
1080 struct pci_dev *pdev;
1081 struct net_device *ndev; /* Parent NET device */
1082
1083 /* Hardware information */
1084 u8 chip_rev_id;
1085 u8 pci_slot;
1086 u8 pci_width;
1087 u8 pci_x;
1088 u32 msi;
1089 int index;
1090 struct timer_list adapter_timer; /* timer used for various functions */
1091
1092 spinlock_t adapter_lock;
1093 spinlock_t hw_lock;
1094
1095 /* PCI Bus Relative Register Addresses */
1096 u8 *mmap_virt_base; /* stores return value from ioremap() */
1097 struct ql3xxx_port_registers __iomem *mem_map_registers;
1098 u32 current_page; /* tracks current register page */
1099
1100 u32 msg_enable;
1101 u8 reserved_01[2];
1102 u8 reserved_02[2];
1103
1104 /* Page for Shadow Registers */
1105 void *shadow_reg_virt_addr;
1106 dma_addr_t shadow_reg_phy_addr;
1107
1108 /* Net Request Queue */
1109 u32 req_q_size;
1110 u32 reserved_03;
1111 struct ob_mac_iocb_req *req_q_virt_addr;
1112 dma_addr_t req_q_phy_addr;
1113 u16 req_producer_index;
1114 u16 reserved_04;
1115 u16 *preq_consumer_index;
1116 u32 req_consumer_index_phy_addr_high;
1117 u32 req_consumer_index_phy_addr_low;
1118 atomic_t tx_count;
1119 struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
1120
1121 /* Net Response Queue */
1122 u32 rsp_q_size;
1123 u32 eeprom_cmd_data;
1124 struct net_rsp_iocb *rsp_q_virt_addr;
1125 dma_addr_t rsp_q_phy_addr;
1126 struct net_rsp_iocb *rsp_current;
1127 u16 rsp_consumer_index;
1128 u16 reserved_06;
1129 u32 *prsp_producer_index;
1130 u32 rsp_producer_index_phy_addr_high;
1131 u32 rsp_producer_index_phy_addr_low;
1132
1133 /* Large Buffer Queue */
1134 u32 lrg_buf_q_alloc_size;
1135 u32 lrg_buf_q_size;
1136 void *lrg_buf_q_alloc_virt_addr;
1137 void *lrg_buf_q_virt_addr;
1138 dma_addr_t lrg_buf_q_alloc_phy_addr;
1139 dma_addr_t lrg_buf_q_phy_addr;
1140 u32 lrg_buf_q_producer_index;
1141 u32 lrg_buf_release_cnt;
1142 struct bufq_addr_element *lrg_buf_next_free;
1143
1144 /* Large (Receive) Buffers */
1145 struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
1146 struct ql_rcv_buf_cb *lrg_buf_free_head;
1147 struct ql_rcv_buf_cb *lrg_buf_free_tail;
1148 u32 lrg_buf_free_count;
1149 u32 lrg_buffer_len;
1150 u32 lrg_buf_index;
1151 u32 lrg_buf_skb_check;
1152
1153 /* Small Buffer Queue */
1154 u32 small_buf_q_alloc_size;
1155 u32 small_buf_q_size;
1156 u32 small_buf_q_producer_index;
1157 void *small_buf_q_alloc_virt_addr;
1158 void *small_buf_q_virt_addr;
1159 dma_addr_t small_buf_q_alloc_phy_addr;
1160 dma_addr_t small_buf_q_phy_addr;
1161 u32 small_buf_index;
1162
1163 /* Small (Receive) Buffers */
1164 void *small_buf_virt_addr;
1165 dma_addr_t small_buf_phy_addr;
1166 u32 small_buf_phy_addr_low;
1167 u32 small_buf_phy_addr_high;
1168 u32 small_buf_release_cnt;
1169 u32 small_buf_total_size;
1170
1171 /* ISR related, saves status for DPC. */
1172 u32 control_status;
1173
1174 struct eeprom_data nvram_data;
1175 struct timer_list ioctl_timer;
1176 u32 port_link_state;
1177 u32 last_rsp_offset;
1178
1179 /* 4022 specific */
1180 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
1181 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
1182 u32 mac_ob_opcode; /* Opcode to use on mac transmission */
1183 u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
1184 u32 update_ob_opcode; /* Opcode to use for updating NCB */
1185 u32 mb_bit_mask; /* MA Bits mask to use on transmission */
1186 u32 numPorts;
1187 struct net_device_stats stats;
1188 struct workqueue_struct *workqueue;
1189 struct work_struct reset_work;
1190 struct work_struct tx_timeout_work;
1191 u32 max_frame_size;
1192};
1193
1194#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 805562b8624e..ebb948bbd589 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2913,7 +2913,7 @@ static struct pci_driver rtl8169_pci_driver = {
2913static int __init 2913static int __init
2914rtl8169_init_module(void) 2914rtl8169_init_module(void)
2915{ 2915{
2916 return pci_module_init(&rtl8169_pci_driver); 2916 return pci_register_driver(&rtl8169_pci_driver);
2917} 2917}
2918 2918
2919static void __exit 2919static void __exit
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index c3ed734cbe39..31bcdad54716 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1736,7 +1736,7 @@ static struct pci_driver rr_driver = {
1736 1736
1737static int __init rr_init_module(void) 1737static int __init rr_init_module(void)
1738{ 1738{
1739 return pci_module_init(&rr_driver); 1739 return pci_register_driver(&rr_driver);
1740} 1740}
1741 1741
1742static void __exit rr_cleanup_module(void) 1742static void __exit rr_cleanup_module(void)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index e1fe3a0a7b0b..c16f9156c98a 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -71,12 +71,13 @@
71#include <asm/uaccess.h> 71#include <asm/uaccess.h>
72#include <asm/io.h> 72#include <asm/io.h>
73#include <asm/div64.h> 73#include <asm/div64.h>
74#include <asm/irq.h>
74 75
75/* local include */ 76/* local include */
76#include "s2io.h" 77#include "s2io.h"
77#include "s2io-regs.h" 78#include "s2io-regs.h"
78 79
79#define DRV_VERSION "2.0.14.2" 80#define DRV_VERSION "2.0.15.2"
80 81
81/* S2io Driver name & version. */ 82/* S2io Driver name & version. */
82static char s2io_driver_name[] = "Neterion"; 83static char s2io_driver_name[] = "Neterion";
@@ -370,38 +371,50 @@ static const u64 fix_mac[] = {
370 END_SIGN 371 END_SIGN
371}; 372};
372 373
374MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
375MODULE_LICENSE("GPL");
376MODULE_VERSION(DRV_VERSION);
377
378
373/* Module Loadable parameters. */ 379/* Module Loadable parameters. */
374static unsigned int tx_fifo_num = 1; 380S2IO_PARM_INT(tx_fifo_num, 1);
375static unsigned int tx_fifo_len[MAX_TX_FIFOS] = 381S2IO_PARM_INT(rx_ring_num, 1);
376 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN}; 382
377static unsigned int rx_ring_num = 1; 383
378static unsigned int rx_ring_sz[MAX_RX_RINGS] = 384S2IO_PARM_INT(rx_ring_mode, 1);
379 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT}; 385S2IO_PARM_INT(use_continuous_tx_intrs, 1);
380static unsigned int rts_frm_len[MAX_RX_RINGS] = 386S2IO_PARM_INT(rmac_pause_time, 0x100);
381 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 387S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
382static unsigned int rx_ring_mode = 1; 388S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
383static unsigned int use_continuous_tx_intrs = 1; 389S2IO_PARM_INT(shared_splits, 0);
384static unsigned int rmac_pause_time = 0x100; 390S2IO_PARM_INT(tmac_util_period, 5);
385static unsigned int mc_pause_threshold_q0q3 = 187; 391S2IO_PARM_INT(rmac_util_period, 5);
386static unsigned int mc_pause_threshold_q4q7 = 187; 392S2IO_PARM_INT(bimodal, 0);
387static unsigned int shared_splits; 393S2IO_PARM_INT(l3l4hdr_size, 128);
388static unsigned int tmac_util_period = 5;
389static unsigned int rmac_util_period = 5;
390static unsigned int bimodal = 0;
391static unsigned int l3l4hdr_size = 128;
392#ifndef CONFIG_S2IO_NAPI
393static unsigned int indicate_max_pkts;
394#endif
395/* Frequency of Rx desc syncs expressed as power of 2 */ 394/* Frequency of Rx desc syncs expressed as power of 2 */
396static unsigned int rxsync_frequency = 3; 395S2IO_PARM_INT(rxsync_frequency, 3);
397/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */ 396/* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
398static unsigned int intr_type = 0; 397S2IO_PARM_INT(intr_type, 0);
399/* Large receive offload feature */ 398/* Large receive offload feature */
400static unsigned int lro = 0; 399S2IO_PARM_INT(lro, 0);
401/* Max pkts to be aggregated by LRO at one time. If not specified, 400/* Max pkts to be aggregated by LRO at one time. If not specified,
402 * aggregation happens until we hit max IP pkt size(64K) 401 * aggregation happens until we hit max IP pkt size(64K)
403 */ 402 */
404static unsigned int lro_max_pkts = 0xFFFF; 403S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404#ifndef CONFIG_S2IO_NAPI
405S2IO_PARM_INT(indicate_max_pkts, 0);
406#endif
407
408static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
409 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
410static unsigned int rx_ring_sz[MAX_RX_RINGS] =
411 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
412static unsigned int rts_frm_len[MAX_RX_RINGS] =
413 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
414
415module_param_array(tx_fifo_len, uint, NULL, 0);
416module_param_array(rx_ring_sz, uint, NULL, 0);
417module_param_array(rts_frm_len, uint, NULL, 0);
405 418
406/* 419/*
407 * S2IO device table. 420 * S2IO device table.
@@ -464,10 +477,9 @@ static int init_shared_mem(struct s2io_nic *nic)
464 size += config->tx_cfg[i].fifo_len; 477 size += config->tx_cfg[i].fifo_len;
465 } 478 }
466 if (size > MAX_AVAILABLE_TXDS) { 479 if (size > MAX_AVAILABLE_TXDS) {
467 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ", 480 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
468 __FUNCTION__);
469 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size); 481 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
470 return FAILURE; 482 return -EINVAL;
471 } 483 }
472 484
473 lst_size = (sizeof(TxD_t) * config->max_txds); 485 lst_size = (sizeof(TxD_t) * config->max_txds);
@@ -547,6 +559,7 @@ static int init_shared_mem(struct s2io_nic *nic)
547 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL); 559 nic->ufo_in_band_v = kmalloc((sizeof(u64) * size), GFP_KERNEL);
548 if (!nic->ufo_in_band_v) 560 if (!nic->ufo_in_band_v)
549 return -ENOMEM; 561 return -ENOMEM;
562 memset(nic->ufo_in_band_v, 0, size);
550 563
551 /* Allocation and initialization of RXDs in Rings */ 564 /* Allocation and initialization of RXDs in Rings */
552 size = 0; 565 size = 0;
@@ -1213,7 +1226,7 @@ static int init_nic(struct s2io_nic *nic)
1213 break; 1226 break;
1214 } 1227 }
1215 1228
1216 /* Enable Tx FIFO partition 0. */ 1229 /* Enable all configured Tx FIFO partitions */
1217 val64 = readq(&bar0->tx_fifo_partition_0); 1230 val64 = readq(&bar0->tx_fifo_partition_0);
1218 val64 |= (TX_FIFO_PARTITION_EN); 1231 val64 |= (TX_FIFO_PARTITION_EN);
1219 writeq(val64, &bar0->tx_fifo_partition_0); 1232 writeq(val64, &bar0->tx_fifo_partition_0);
@@ -1650,7 +1663,7 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1650 writeq(temp64, &bar0->general_int_mask); 1663 writeq(temp64, &bar0->general_int_mask);
1651 /* 1664 /*
1652 * If Hercules adapter enable GPIO otherwise 1665 * If Hercules adapter enable GPIO otherwise
1653 * disabled all PCIX, Flash, MDIO, IIC and GPIO 1666 * disable all PCIX, Flash, MDIO, IIC and GPIO
1654 * interrupts for now. 1667 * interrupts for now.
1655 * TODO 1668 * TODO
1656 */ 1669 */
@@ -2119,7 +2132,7 @@ static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, in
2119 frag->size, PCI_DMA_TODEVICE); 2132 frag->size, PCI_DMA_TODEVICE);
2120 } 2133 }
2121 } 2134 }
2122 txdlp->Host_Control = 0; 2135 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2123 return(skb); 2136 return(skb);
2124} 2137}
2125 2138
@@ -2371,9 +2384,14 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2371 skb->data = (void *) (unsigned long)tmp; 2384 skb->data = (void *) (unsigned long)tmp;
2372 skb->tail = (void *) (unsigned long)tmp; 2385 skb->tail = (void *) (unsigned long)tmp;
2373 2386
2374 ((RxD3_t*)rxdp)->Buffer0_ptr = 2387 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2375 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2388 ((RxD3_t*)rxdp)->Buffer0_ptr =
2389 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2376 PCI_DMA_FROMDEVICE); 2390 PCI_DMA_FROMDEVICE);
2391 else
2392 pci_dma_sync_single_for_device(nic->pdev,
2393 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2394 BUF0_LEN, PCI_DMA_FROMDEVICE);
2377 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2395 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2378 if (nic->rxd_mode == RXD_MODE_3B) { 2396 if (nic->rxd_mode == RXD_MODE_3B) {
2379 /* Two buffer mode */ 2397 /* Two buffer mode */
@@ -2386,10 +2404,13 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2386 (nic->pdev, skb->data, dev->mtu + 4, 2404 (nic->pdev, skb->data, dev->mtu + 4,
2387 PCI_DMA_FROMDEVICE); 2405 PCI_DMA_FROMDEVICE);
2388 2406
2389 /* Buffer-1 will be dummy buffer not used */ 2407 /* Buffer-1 will be dummy buffer. Not used */
2390 ((RxD3_t*)rxdp)->Buffer1_ptr = 2408 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2391 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2409 ((RxD3_t*)rxdp)->Buffer1_ptr =
2392 PCI_DMA_FROMDEVICE); 2410 pci_map_single(nic->pdev,
2411 ba->ba_1, BUF1_LEN,
2412 PCI_DMA_FROMDEVICE);
2413 }
2393 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2414 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2394 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2415 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2395 (dev->mtu + 4); 2416 (dev->mtu + 4);
@@ -2614,23 +2635,23 @@ no_rx:
2614} 2635}
2615#endif 2636#endif
2616 2637
2638#ifdef CONFIG_NET_POLL_CONTROLLER
2617/** 2639/**
2618 * s2io_netpoll - Rx interrupt service handler for netpoll support 2640 * s2io_netpoll - netpoll event handler entry point
2619 * @dev : pointer to the device structure. 2641 * @dev : pointer to the device structure.
2620 * Description: 2642 * Description:
2621 * Polling 'interrupt' - used by things like netconsole to send skbs 2643 * This function will be called by upper layer to check for events on the
2622 * without having to re-enable interrupts. It's not called while 2644 * interface in situations where interrupts are disabled. It is used for
2623 * the interrupt routine is executing. 2645 * specific in-kernel networking tasks, such as remote consoles and kernel
2646 * debugging over the network (example netdump in RedHat).
2624 */ 2647 */
2625
2626#ifdef CONFIG_NET_POLL_CONTROLLER
2627static void s2io_netpoll(struct net_device *dev) 2648static void s2io_netpoll(struct net_device *dev)
2628{ 2649{
2629 nic_t *nic = dev->priv; 2650 nic_t *nic = dev->priv;
2630 mac_info_t *mac_control; 2651 mac_info_t *mac_control;
2631 struct config_param *config; 2652 struct config_param *config;
2632 XENA_dev_config_t __iomem *bar0 = nic->bar0; 2653 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2633 u64 val64; 2654 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2634 int i; 2655 int i;
2635 2656
2636 disable_irq(dev->irq); 2657 disable_irq(dev->irq);
@@ -2639,9 +2660,17 @@ static void s2io_netpoll(struct net_device *dev)
2639 mac_control = &nic->mac_control; 2660 mac_control = &nic->mac_control;
2640 config = &nic->config; 2661 config = &nic->config;
2641 2662
2642 val64 = readq(&bar0->rx_traffic_int);
2643 writeq(val64, &bar0->rx_traffic_int); 2663 writeq(val64, &bar0->rx_traffic_int);
2664 writeq(val64, &bar0->tx_traffic_int);
2644 2665
2666 /* we need to free up the transmitted skbufs or else netpoll will
2667 * run out of skbs and will fail and eventually netpoll application such
2668 * as netdump will fail.
2669 */
2670 for (i = 0; i < config->tx_fifo_num; i++)
2671 tx_intr_handler(&mac_control->fifos[i]);
2672
2673 /* check for received packet and indicate up to network */
2645 for (i = 0; i < config->rx_ring_num; i++) 2674 for (i = 0; i < config->rx_ring_num; i++)
2646 rx_intr_handler(&mac_control->rings[i]); 2675 rx_intr_handler(&mac_control->rings[i]);
2647 2676
@@ -2708,7 +2737,7 @@ static void rx_intr_handler(ring_info_t *ring_data)
2708 /* If your are next to put index then it's FIFO full condition */ 2737 /* If your are next to put index then it's FIFO full condition */
2709 if ((get_block == put_block) && 2738 if ((get_block == put_block) &&
2710 (get_info.offset + 1) == put_info.offset) { 2739 (get_info.offset + 1) == put_info.offset) {
2711 DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2740 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2712 break; 2741 break;
2713 } 2742 }
2714 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2743 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
@@ -2728,18 +2757,15 @@ static void rx_intr_handler(ring_info_t *ring_data)
2728 HEADER_SNAP_SIZE, 2757 HEADER_SNAP_SIZE,
2729 PCI_DMA_FROMDEVICE); 2758 PCI_DMA_FROMDEVICE);
2730 } else if (nic->rxd_mode == RXD_MODE_3B) { 2759 } else if (nic->rxd_mode == RXD_MODE_3B) {
2731 pci_unmap_single(nic->pdev, (dma_addr_t) 2760 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2732 ((RxD3_t*)rxdp)->Buffer0_ptr, 2761 ((RxD3_t*)rxdp)->Buffer0_ptr,
2733 BUF0_LEN, PCI_DMA_FROMDEVICE); 2762 BUF0_LEN, PCI_DMA_FROMDEVICE);
2734 pci_unmap_single(nic->pdev, (dma_addr_t) 2763 pci_unmap_single(nic->pdev, (dma_addr_t)
2735 ((RxD3_t*)rxdp)->Buffer1_ptr,
2736 BUF1_LEN, PCI_DMA_FROMDEVICE);
2737 pci_unmap_single(nic->pdev, (dma_addr_t)
2738 ((RxD3_t*)rxdp)->Buffer2_ptr, 2764 ((RxD3_t*)rxdp)->Buffer2_ptr,
2739 dev->mtu + 4, 2765 dev->mtu + 4,
2740 PCI_DMA_FROMDEVICE); 2766 PCI_DMA_FROMDEVICE);
2741 } else { 2767 } else {
2742 pci_unmap_single(nic->pdev, (dma_addr_t) 2768 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2743 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2769 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2744 PCI_DMA_FROMDEVICE); 2770 PCI_DMA_FROMDEVICE);
2745 pci_unmap_single(nic->pdev, (dma_addr_t) 2771 pci_unmap_single(nic->pdev, (dma_addr_t)
@@ -3327,7 +3353,7 @@ static void s2io_reset(nic_t * sp)
3327 3353
3328 /* Clear certain PCI/PCI-X fields after reset */ 3354 /* Clear certain PCI/PCI-X fields after reset */
3329 if (sp->device_type == XFRAME_II_DEVICE) { 3355 if (sp->device_type == XFRAME_II_DEVICE) {
3330 /* Clear parity err detect bit */ 3356 /* Clear "detected parity error" bit */
3331 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000); 3357 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3332 3358
3333 /* Clearing PCIX Ecc status register */ 3359 /* Clearing PCIX Ecc status register */
@@ -3528,7 +3554,7 @@ static void restore_xmsi_data(nic_t *nic)
3528 u64 val64; 3554 u64 val64;
3529 int i; 3555 int i;
3530 3556
3531 for (i=0; i< nic->avail_msix_vectors; i++) { 3557 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3532 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3558 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3533 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3559 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3534 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6)); 3560 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3547,7 +3573,7 @@ static void store_xmsi_data(nic_t *nic)
3547 int i; 3573 int i;
3548 3574
3549 /* Store and display */ 3575 /* Store and display */
3550 for (i=0; i< nic->avail_msix_vectors; i++) { 3576 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3551 val64 = (BIT(15) | vBIT(i, 26, 6)); 3577 val64 = (BIT(15) | vBIT(i, 26, 6));
3552 writeq(val64, &bar0->xmsi_access); 3578 writeq(val64, &bar0->xmsi_access);
3553 if (wait_for_msix_trans(nic, i)) { 3579 if (wait_for_msix_trans(nic, i)) {
@@ -3808,13 +3834,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3808 TxD_t *txdp; 3834 TxD_t *txdp;
3809 TxFIFO_element_t __iomem *tx_fifo; 3835 TxFIFO_element_t __iomem *tx_fifo;
3810 unsigned long flags; 3836 unsigned long flags;
3811#ifdef NETIF_F_TSO
3812 int mss;
3813#endif
3814 u16 vlan_tag = 0; 3837 u16 vlan_tag = 0;
3815 int vlan_priority = 0; 3838 int vlan_priority = 0;
3816 mac_info_t *mac_control; 3839 mac_info_t *mac_control;
3817 struct config_param *config; 3840 struct config_param *config;
3841 int offload_type;
3818 3842
3819 mac_control = &sp->mac_control; 3843 mac_control = &sp->mac_control;
3820 config = &sp->config; 3844 config = &sp->config;
@@ -3862,13 +3886,11 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3862 return 0; 3886 return 0;
3863 } 3887 }
3864 3888
3865 txdp->Control_1 = 0; 3889 offload_type = s2io_offload_type(skb);
3866 txdp->Control_2 = 0;
3867#ifdef NETIF_F_TSO 3890#ifdef NETIF_F_TSO
3868 mss = skb_shinfo(skb)->gso_size; 3891 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3869 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3870 txdp->Control_1 |= TXD_TCP_LSO_EN; 3892 txdp->Control_1 |= TXD_TCP_LSO_EN;
3871 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss); 3893 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3872 } 3894 }
3873#endif 3895#endif
3874 if (skb->ip_summed == CHECKSUM_HW) { 3896 if (skb->ip_summed == CHECKSUM_HW) {
@@ -3886,10 +3908,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3886 } 3908 }
3887 3909
3888 frg_len = skb->len - skb->data_len; 3910 frg_len = skb->len - skb->data_len;
3889 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) { 3911 if (offload_type == SKB_GSO_UDP) {
3890 int ufo_size; 3912 int ufo_size;
3891 3913
3892 ufo_size = skb_shinfo(skb)->gso_size; 3914 ufo_size = s2io_udp_mss(skb);
3893 ufo_size &= ~7; 3915 ufo_size &= ~7;
3894 txdp->Control_1 |= TXD_UFO_EN; 3916 txdp->Control_1 |= TXD_UFO_EN;
3895 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 3917 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
@@ -3906,16 +3928,13 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3906 sp->ufo_in_band_v, 3928 sp->ufo_in_band_v,
3907 sizeof(u64), PCI_DMA_TODEVICE); 3929 sizeof(u64), PCI_DMA_TODEVICE);
3908 txdp++; 3930 txdp++;
3909 txdp->Control_1 = 0;
3910 txdp->Control_2 = 0;
3911 } 3931 }
3912 3932
3913 txdp->Buffer_Pointer = pci_map_single 3933 txdp->Buffer_Pointer = pci_map_single
3914 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 3934 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3915 txdp->Host_Control = (unsigned long) skb; 3935 txdp->Host_Control = (unsigned long) skb;
3916 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); 3936 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3917 3937 if (offload_type == SKB_GSO_UDP)
3918 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3919 txdp->Control_1 |= TXD_UFO_EN; 3938 txdp->Control_1 |= TXD_UFO_EN;
3920 3939
3921 frg_cnt = skb_shinfo(skb)->nr_frags; 3940 frg_cnt = skb_shinfo(skb)->nr_frags;
@@ -3930,12 +3949,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3930 (sp->pdev, frag->page, frag->page_offset, 3949 (sp->pdev, frag->page, frag->page_offset,
3931 frag->size, PCI_DMA_TODEVICE); 3950 frag->size, PCI_DMA_TODEVICE);
3932 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 3951 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3933 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3952 if (offload_type == SKB_GSO_UDP)
3934 txdp->Control_1 |= TXD_UFO_EN; 3953 txdp->Control_1 |= TXD_UFO_EN;
3935 } 3954 }
3936 txdp->Control_1 |= TXD_GATHER_CODE_LAST; 3955 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3937 3956
3938 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP) 3957 if (offload_type == SKB_GSO_UDP)
3939 frg_cnt++; /* as Txd0 was used for inband header */ 3958 frg_cnt++; /* as Txd0 was used for inband header */
3940 3959
3941 tx_fifo = mac_control->tx_FIFO_start[queue]; 3960 tx_fifo = mac_control->tx_FIFO_start[queue];
@@ -3944,13 +3963,9 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3944 3963
3945 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST | 3964 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3946 TX_FIFO_LAST_LIST); 3965 TX_FIFO_LAST_LIST);
3947 3966 if (offload_type)
3948#ifdef NETIF_F_TSO
3949 if (mss)
3950 val64 |= TX_FIFO_SPECIAL_FUNC;
3951#endif
3952 if (skb_shinfo(skb)->gso_type == SKB_GSO_UDP)
3953 val64 |= TX_FIFO_SPECIAL_FUNC; 3967 val64 |= TX_FIFO_SPECIAL_FUNC;
3968
3954 writeq(val64, &tx_fifo->List_Control); 3969 writeq(val64, &tx_fifo->List_Control);
3955 3970
3956 mmiowb(); 3971 mmiowb();
@@ -3984,13 +3999,41 @@ s2io_alarm_handle(unsigned long data)
3984 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 3999 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3985} 4000}
3986 4001
4002static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
4003{
4004 int rxb_size, level;
4005
4006 if (!sp->lro) {
4007 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4008 level = rx_buffer_level(sp, rxb_size, rng_n);
4009
4010 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4011 int ret;
4012 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4013 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4014 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4015 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4016 __FUNCTION__);
4017 clear_bit(0, (&sp->tasklet_status));
4018 return -1;
4019 }
4020 clear_bit(0, (&sp->tasklet_status));
4021 } else if (level == LOW)
4022 tasklet_schedule(&sp->task);
4023
4024 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4025 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4026 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4027 }
4028 return 0;
4029}
4030
3987static irqreturn_t 4031static irqreturn_t
3988s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs) 4032s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
3989{ 4033{
3990 struct net_device *dev = (struct net_device *) dev_id; 4034 struct net_device *dev = (struct net_device *) dev_id;
3991 nic_t *sp = dev->priv; 4035 nic_t *sp = dev->priv;
3992 int i; 4036 int i;
3993 int ret;
3994 mac_info_t *mac_control; 4037 mac_info_t *mac_control;
3995 struct config_param *config; 4038 struct config_param *config;
3996 4039
@@ -4012,35 +4055,8 @@ s2io_msi_handle(int irq, void *dev_id, struct pt_regs *regs)
4012 * reallocate the buffers from the interrupt handler itself, 4055 * reallocate the buffers from the interrupt handler itself,
4013 * else schedule a tasklet to reallocate the buffers. 4056 * else schedule a tasklet to reallocate the buffers.
4014 */ 4057 */
4015 for (i = 0; i < config->rx_ring_num; i++) { 4058 for (i = 0; i < config->rx_ring_num; i++)
4016 if (!sp->lro) { 4059 s2io_chk_rx_buffers(sp, i);
4017 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4018 int level = rx_buffer_level(sp, rxb_size, i);
4019
4020 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4021 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4022 dev->name);
4023 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4024 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4025 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4026 dev->name);
4027 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4028 clear_bit(0, (&sp->tasklet_status));
4029 atomic_dec(&sp->isr_cnt);
4030 return IRQ_HANDLED;
4031 }
4032 clear_bit(0, (&sp->tasklet_status));
4033 } else if (level == LOW) {
4034 tasklet_schedule(&sp->task);
4035 }
4036 }
4037 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4038 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4039 dev->name);
4040 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4041 break;
4042 }
4043 }
4044 4060
4045 atomic_dec(&sp->isr_cnt); 4061 atomic_dec(&sp->isr_cnt);
4046 return IRQ_HANDLED; 4062 return IRQ_HANDLED;
@@ -4051,39 +4067,13 @@ s2io_msix_ring_handle(int irq, void *dev_id, struct pt_regs *regs)
4051{ 4067{
4052 ring_info_t *ring = (ring_info_t *)dev_id; 4068 ring_info_t *ring = (ring_info_t *)dev_id;
4053 nic_t *sp = ring->nic; 4069 nic_t *sp = ring->nic;
4054 struct net_device *dev = (struct net_device *) dev_id;
4055 int rxb_size, level, rng_n;
4056 4070
4057 atomic_inc(&sp->isr_cnt); 4071 atomic_inc(&sp->isr_cnt);
4058 rx_intr_handler(ring);
4059
4060 rng_n = ring->ring_no;
4061 if (!sp->lro) {
4062 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4063 level = rx_buffer_level(sp, rxb_size, rng_n);
4064 4072
4065 if ((level == PANIC) && (!TASKLET_IN_USE)) { 4073 rx_intr_handler(ring);
4066 int ret; 4074 s2io_chk_rx_buffers(sp, ring->ring_no);
4067 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4068 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4069 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4070 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4071 __FUNCTION__);
4072 clear_bit(0, (&sp->tasklet_status));
4073 return IRQ_HANDLED;
4074 }
4075 clear_bit(0, (&sp->tasklet_status));
4076 } else if (level == LOW) {
4077 tasklet_schedule(&sp->task);
4078 }
4079 }
4080 else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4081 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
4082 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4083 }
4084 4075
4085 atomic_dec(&sp->isr_cnt); 4076 atomic_dec(&sp->isr_cnt);
4086
4087 return IRQ_HANDLED; 4077 return IRQ_HANDLED;
4088} 4078}
4089 4079
@@ -4248,37 +4238,8 @@ static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
4248 * else schedule a tasklet to reallocate the buffers. 4238 * else schedule a tasklet to reallocate the buffers.
4249 */ 4239 */
4250#ifndef CONFIG_S2IO_NAPI 4240#ifndef CONFIG_S2IO_NAPI
4251 for (i = 0; i < config->rx_ring_num; i++) { 4241 for (i = 0; i < config->rx_ring_num; i++)
4252 if (!sp->lro) { 4242 s2io_chk_rx_buffers(sp, i);
4253 int ret;
4254 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
4255 int level = rx_buffer_level(sp, rxb_size, i);
4256
4257 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4258 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ",
4259 dev->name);
4260 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4261 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
4262 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4263 dev->name);
4264 DBG_PRINT(ERR_DBG, " in ISR!!\n");
4265 clear_bit(0, (&sp->tasklet_status));
4266 atomic_dec(&sp->isr_cnt);
4267 writeq(org_mask, &bar0->general_int_mask);
4268 return IRQ_HANDLED;
4269 }
4270 clear_bit(0, (&sp->tasklet_status));
4271 } else if (level == LOW) {
4272 tasklet_schedule(&sp->task);
4273 }
4274 }
4275 else if (fill_rx_buffers(sp, i) == -ENOMEM) {
4276 DBG_PRINT(ERR_DBG, "%s:Out of memory",
4277 dev->name);
4278 DBG_PRINT(ERR_DBG, " in Rx intr!!\n");
4279 break;
4280 }
4281 }
4282#endif 4243#endif
4283 writeq(org_mask, &bar0->general_int_mask); 4244 writeq(org_mask, &bar0->general_int_mask);
4284 atomic_dec(&sp->isr_cnt); 4245 atomic_dec(&sp->isr_cnt);
@@ -4308,6 +4269,8 @@ static void s2io_updt_stats(nic_t *sp)
4308 if (cnt == 5) 4269 if (cnt == 5)
4309 break; /* Updt failed */ 4270 break; /* Updt failed */
4310 } while(1); 4271 } while(1);
4272 } else {
4273 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
4311 } 4274 }
4312} 4275}
4313 4276
@@ -4942,7 +4905,8 @@ static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4942} 4905}
4943static void s2io_vpd_read(nic_t *nic) 4906static void s2io_vpd_read(nic_t *nic)
4944{ 4907{
4945 u8 vpd_data[256],data; 4908 u8 *vpd_data;
4909 u8 data;
4946 int i=0, cnt, fail = 0; 4910 int i=0, cnt, fail = 0;
4947 int vpd_addr = 0x80; 4911 int vpd_addr = 0x80;
4948 4912
@@ -4955,6 +4919,10 @@ static void s2io_vpd_read(nic_t *nic)
4955 vpd_addr = 0x50; 4919 vpd_addr = 0x50;
4956 } 4920 }
4957 4921
4922 vpd_data = kmalloc(256, GFP_KERNEL);
4923 if (!vpd_data)
4924 return;
4925
4958 for (i = 0; i < 256; i +=4 ) { 4926 for (i = 0; i < 256; i +=4 ) {
4959 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i); 4927 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4960 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data); 4928 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
@@ -4977,6 +4945,7 @@ static void s2io_vpd_read(nic_t *nic)
4977 memset(nic->product_name, 0, vpd_data[1]); 4945 memset(nic->product_name, 0, vpd_data[1]);
4978 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]); 4946 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4979 } 4947 }
4948 kfree(vpd_data);
4980} 4949}
4981 4950
4982/** 4951/**
@@ -5295,7 +5264,7 @@ static int s2io_link_test(nic_t * sp, uint64_t * data)
5295 else 5264 else
5296 *data = 0; 5265 *data = 0;
5297 5266
5298 return 0; 5267 return *data;
5299} 5268}
5300 5269
5301/** 5270/**
@@ -5753,6 +5722,19 @@ static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5753 return 0; 5722 return 0;
5754} 5723}
5755 5724
5725static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5726{
5727 return (dev->features & NETIF_F_TSO) != 0;
5728}
5729static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5730{
5731 if (data)
5732 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5733 else
5734 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5735
5736 return 0;
5737}
5756 5738
5757static struct ethtool_ops netdev_ethtool_ops = { 5739static struct ethtool_ops netdev_ethtool_ops = {
5758 .get_settings = s2io_ethtool_gset, 5740 .get_settings = s2io_ethtool_gset,
@@ -5773,8 +5755,8 @@ static struct ethtool_ops netdev_ethtool_ops = {
5773 .get_sg = ethtool_op_get_sg, 5755 .get_sg = ethtool_op_get_sg,
5774 .set_sg = ethtool_op_set_sg, 5756 .set_sg = ethtool_op_set_sg,
5775#ifdef NETIF_F_TSO 5757#ifdef NETIF_F_TSO
5776 .get_tso = ethtool_op_get_tso, 5758 .get_tso = s2io_ethtool_op_get_tso,
5777 .set_tso = ethtool_op_set_tso, 5759 .set_tso = s2io_ethtool_op_set_tso,
5778#endif 5760#endif
5779 .get_ufo = ethtool_op_get_ufo, 5761 .get_ufo = ethtool_op_get_ufo,
5780 .set_ufo = ethtool_op_set_ufo, 5762 .set_ufo = ethtool_op_set_ufo,
@@ -6337,7 +6319,7 @@ static int s2io_card_up(nic_t * sp)
6337 s2io_set_multicast(dev); 6319 s2io_set_multicast(dev);
6338 6320
6339 if (sp->lro) { 6321 if (sp->lro) {
6340 /* Initialize max aggregatable pkts based on MTU */ 6322 /* Initialize max aggregatable pkts per session based on MTU */
6341 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu; 6323 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6342 /* Check if we can use(if specified) user provided value */ 6324 /* Check if we can use(if specified) user provided value */
6343 if (lro_max_pkts < sp->lro_max_aggr_per_sess) 6325 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
@@ -6438,7 +6420,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
6438 * @cksum : FCS checksum of the frame. 6420 * @cksum : FCS checksum of the frame.
6439 * @ring_no : the ring from which this RxD was extracted. 6421 * @ring_no : the ring from which this RxD was extracted.
6440 * Description: 6422 * Description:
6441 * This function is called by the Tx interrupt serivce routine to perform 6423 * This function is called by the Rx interrupt serivce routine to perform
6442 * some OS related operations on the SKB before passing it to the upper 6424 * some OS related operations on the SKB before passing it to the upper
6443 * layers. It mainly checks if the checksum is OK, if so adds it to the 6425 * layers. It mainly checks if the checksum is OK, if so adds it to the
6444 * SKBs cksum variable, increments the Rx packet count and passes the SKB 6426 * SKBs cksum variable, increments the Rx packet count and passes the SKB
@@ -6698,33 +6680,6 @@ static void s2io_init_pci(nic_t * sp)
6698 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 6680 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6699} 6681}
6700 6682
6701MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
6702MODULE_LICENSE("GPL");
6703MODULE_VERSION(DRV_VERSION);
6704
6705module_param(tx_fifo_num, int, 0);
6706module_param(rx_ring_num, int, 0);
6707module_param(rx_ring_mode, int, 0);
6708module_param_array(tx_fifo_len, uint, NULL, 0);
6709module_param_array(rx_ring_sz, uint, NULL, 0);
6710module_param_array(rts_frm_len, uint, NULL, 0);
6711module_param(use_continuous_tx_intrs, int, 1);
6712module_param(rmac_pause_time, int, 0);
6713module_param(mc_pause_threshold_q0q3, int, 0);
6714module_param(mc_pause_threshold_q4q7, int, 0);
6715module_param(shared_splits, int, 0);
6716module_param(tmac_util_period, int, 0);
6717module_param(rmac_util_period, int, 0);
6718module_param(bimodal, bool, 0);
6719module_param(l3l4hdr_size, int , 0);
6720#ifndef CONFIG_S2IO_NAPI
6721module_param(indicate_max_pkts, int, 0);
6722#endif
6723module_param(rxsync_frequency, int, 0);
6724module_param(intr_type, int, 0);
6725module_param(lro, int, 0);
6726module_param(lro_max_pkts, int, 0);
6727
6728static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 6683static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6729{ 6684{
6730 if ( tx_fifo_num > 8) { 6685 if ( tx_fifo_num > 8) {
@@ -6832,8 +6787,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6832 } 6787 }
6833 if (dev_intr_type != MSI_X) { 6788 if (dev_intr_type != MSI_X) {
6834 if (pci_request_regions(pdev, s2io_driver_name)) { 6789 if (pci_request_regions(pdev, s2io_driver_name)) {
6835 DBG_PRINT(ERR_DBG, "Request Regions failed\n"), 6790 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6836 pci_disable_device(pdev); 6791 pci_disable_device(pdev);
6837 return -ENODEV; 6792 return -ENODEV;
6838 } 6793 }
6839 } 6794 }
@@ -6957,7 +6912,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6957 /* initialize the shared memory used by the NIC and the host */ 6912 /* initialize the shared memory used by the NIC and the host */
6958 if (init_shared_mem(sp)) { 6913 if (init_shared_mem(sp)) {
6959 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", 6914 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6960 __FUNCTION__); 6915 dev->name);
6961 ret = -ENOMEM; 6916 ret = -ENOMEM;
6962 goto mem_alloc_failed; 6917 goto mem_alloc_failed;
6963 } 6918 }
@@ -7094,6 +7049,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7094 dev->addr_len = ETH_ALEN; 7049 dev->addr_len = ETH_ALEN;
7095 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN); 7050 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7096 7051
7052 /* reset Nic and bring it to known state */
7053 s2io_reset(sp);
7054
7097 /* 7055 /*
7098 * Initialize the tasklet status and link state flags 7056 * Initialize the tasklet status and link state flags
7099 * and the card state parameter 7057 * and the card state parameter
@@ -7131,11 +7089,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7131 goto register_failed; 7089 goto register_failed;
7132 } 7090 }
7133 s2io_vpd_read(sp); 7091 s2io_vpd_read(sp);
7134 DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
7135 DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
7136 get_xena_rev_id(sp->pdev),
7137 s2io_driver_version);
7138 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n"); 7092 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7093 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7094 sp->product_name, get_xena_rev_id(sp->pdev));
7095 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7096 s2io_driver_version);
7139 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: " 7097 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7140 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name, 7098 "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
7141 sp->def_mac_addr[0].mac_addr[0], 7099 sp->def_mac_addr[0].mac_addr[0],
@@ -7275,7 +7233,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7275 7233
7276int __init s2io_starter(void) 7234int __init s2io_starter(void)
7277{ 7235{
7278 return pci_module_init(&s2io_driver); 7236 return pci_register_driver(&s2io_driver);
7279} 7237}
7280 7238
7281/** 7239/**
@@ -7436,8 +7394,13 @@ static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7436 if (ip->ihl != 5) /* IP has options */ 7394 if (ip->ihl != 5) /* IP has options */
7437 return -1; 7395 return -1;
7438 7396
7397 /* If we see CE codepoint in IP header, packet is not mergeable */
7398 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7399 return -1;
7400
7401 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7439 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin || 7402 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7440 !tcp->ack) { 7403 tcp->ece || tcp->cwr || !tcp->ack) {
7441 /* 7404 /*
7442 * Currently recognize only the ack control word and 7405 * Currently recognize only the ack control word and
7443 * any other control field being set would result in 7406 * any other control field being set would result in
@@ -7591,18 +7554,16 @@ static void queue_rx_frame(struct sk_buff *skb)
7591static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, 7554static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7592 u32 tcp_len) 7555 u32 tcp_len)
7593{ 7556{
7594 struct sk_buff *tmp, *first = lro->parent; 7557 struct sk_buff *first = lro->parent;
7595 7558
7596 first->len += tcp_len; 7559 first->len += tcp_len;
7597 first->data_len = lro->frags_len; 7560 first->data_len = lro->frags_len;
7598 skb_pull(skb, (skb->len - tcp_len)); 7561 skb_pull(skb, (skb->len - tcp_len));
7599 if ((tmp = skb_shinfo(first)->frag_list)) { 7562 if (skb_shinfo(first)->frag_list)
7600 while (tmp->next) 7563 lro->last_frag->next = skb;
7601 tmp = tmp->next;
7602 tmp->next = skb;
7603 }
7604 else 7564 else
7605 skb_shinfo(first)->frag_list = skb; 7565 skb_shinfo(first)->frag_list = skb;
7566 lro->last_frag = skb;
7606 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 7567 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
7607 return; 7568 return;
7608} 7569}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 217097bc22f1..5ed49c3be1e9 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -719,6 +719,7 @@ struct msix_info_st {
719/* Data structure to represent a LRO session */ 719/* Data structure to represent a LRO session */
720typedef struct lro { 720typedef struct lro {
721 struct sk_buff *parent; 721 struct sk_buff *parent;
722 struct sk_buff *last_frag;
722 u8 *l2h; 723 u8 *l2h;
723 struct iphdr *iph; 724 struct iphdr *iph;
724 struct tcphdr *tcph; 725 struct tcphdr *tcph;
@@ -1011,4 +1012,13 @@ static void clear_lro_session(lro_t *lro);
1011static void queue_rx_frame(struct sk_buff *skb); 1012static void queue_rx_frame(struct sk_buff *skb);
1012static void update_L3L4_header(nic_t *sp, lro_t *lro); 1013static void update_L3L4_header(nic_t *sp, lro_t *lro);
1013static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len); 1014static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb, u32 tcp_len);
1015
1016#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1017#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
1018#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
1019
1020#define S2IO_PARM_INT(X, def_val) \
1021 static unsigned int X = def_val;\
1022 module_param(X , uint, 0);
1023
1014#endif /* _S2IO_H */ 1024#endif /* _S2IO_H */
diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c
index b2acedbefa8f..c479b07be788 100644
--- a/drivers/net/saa9730.c
+++ b/drivers/net/saa9730.c
@@ -1131,7 +1131,7 @@ static struct pci_driver saa9730_driver = {
1131 1131
1132static int __init saa9730_init(void) 1132static int __init saa9730_init(void)
1133{ 1133{
1134 return pci_module_init(&saa9730_driver); 1134 return pci_register_driver(&saa9730_driver);
1135} 1135}
1136 1136
1137static void __exit saa9730_cleanup(void) 1137static void __exit saa9730_cleanup(void)
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 9ab1618e82a4..e4c8896b76cb 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2708,7 +2708,6 @@ static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
2708static void sbmac_set_rx_mode(struct net_device *dev) 2708static void sbmac_set_rx_mode(struct net_device *dev)
2709{ 2709{
2710 unsigned long flags; 2710 unsigned long flags;
2711 int msg_flag = 0;
2712 struct sbmac_softc *sc = netdev_priv(dev); 2711 struct sbmac_softc *sc = netdev_priv(dev);
2713 2712
2714 spin_lock_irqsave(&sc->sbm_lock, flags); 2713 spin_lock_irqsave(&sc->sbm_lock, flags);
@@ -2718,22 +2717,14 @@ static void sbmac_set_rx_mode(struct net_device *dev)
2718 */ 2717 */
2719 2718
2720 if (dev->flags & IFF_PROMISC) { 2719 if (dev->flags & IFF_PROMISC) {
2721 /* Unconditionally log net taps. */
2722 msg_flag = 1;
2723 sbmac_promiscuous_mode(sc,1); 2720 sbmac_promiscuous_mode(sc,1);
2724 } 2721 }
2725 else { 2722 else {
2726 msg_flag = 2;
2727 sbmac_promiscuous_mode(sc,0); 2723 sbmac_promiscuous_mode(sc,0);
2728 } 2724 }
2729 } 2725 }
2730 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2726 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2731 2727
2732 if (msg_flag) {
2733 printk(KERN_NOTICE "%s: Promiscuous mode %sabled.\n",
2734 dev->name,(msg_flag==1)?"en":"dis");
2735 }
2736
2737 /* 2728 /*
2738 * Program the multicasts. Do this every time. 2729 * Program the multicasts. Do this every time.
2739 */ 2730 */
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index efd0f235020f..01392bca0223 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -742,7 +742,7 @@ module_param(irq, int, 0);
742MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); 742MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
743MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); 743MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
744 744
745int init_module(void) 745int __init init_module(void)
746{ 746{
747 dev_seeq = seeq8005_probe(-1); 747 dev_seeq = seeq8005_probe(-1);
748 if (IS_ERR(dev_seeq)) 748 if (IS_ERR(dev_seeq))
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index df0cbebb3277..7c1982af2a44 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -821,9 +821,6 @@ static void sis190_set_rx_mode(struct net_device *dev)
821 u16 rx_mode; 821 u16 rx_mode;
822 822
823 if (dev->flags & IFF_PROMISC) { 823 if (dev->flags & IFF_PROMISC) {
824 /* Unconditionally log net taps. */
825 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
826 dev->name);
827 rx_mode = 824 rx_mode =
828 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 825 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
829 AcceptAllPhys; 826 AcceptAllPhys;
@@ -1871,7 +1868,7 @@ static struct pci_driver sis190_pci_driver = {
1871 1868
1872static int __init sis190_init_module(void) 1869static int __init sis190_init_module(void)
1873{ 1870{
1874 return pci_module_init(&sis190_pci_driver); 1871 return pci_register_driver(&sis190_pci_driver);
1875} 1872}
1876 1873
1877static void __exit sis190_cleanup_module(void) 1874static void __exit sis190_cleanup_module(void)
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 29ee7ffedfff..6af50286349d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -134,6 +134,7 @@ static const struct mii_chip_info {
134 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, 134 { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN },
135 { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME}, 135 { "AMD 79C901 HomePNA PHY", 0x0000, 0x6B90, HOME},
136 { "ICS LAN PHY", 0x0015, 0xF440, LAN }, 136 { "ICS LAN PHY", 0x0015, 0xF440, LAN },
137 { "ICS LAN PHY", 0x0143, 0xBC70, LAN },
137 { "NS 83851 PHY", 0x2000, 0x5C20, MIX }, 138 { "NS 83851 PHY", 0x2000, 0x5C20, MIX },
138 { "NS 83847 PHY", 0x2000, 0x5C30, MIX }, 139 { "NS 83847 PHY", 0x2000, 0x5C30, MIX },
139 { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN }, 140 { "Realtek RTL8201 PHY", 0x0000, 0x8200, LAN },
@@ -2495,7 +2496,7 @@ static int __init sis900_init_module(void)
2495 printk(version); 2496 printk(version);
2496#endif 2497#endif
2497 2498
2498 return pci_module_init(&sis900_pci_driver); 2499 return pci_register_driver(&sis900_pci_driver);
2499} 2500}
2500 2501
2501static void __exit sis900_cleanup_module(void) 2502static void __exit sis900_cleanup_module(void)
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
index ee62845d3ac9..49e76c7f10da 100644
--- a/drivers/net/sk98lin/skge.c
+++ b/drivers/net/sk98lin/skge.c
@@ -5133,7 +5133,7 @@ static struct pci_driver skge_driver = {
5133 5133
5134static int __init skge_init(void) 5134static int __init skge_init(void)
5135{ 5135{
5136 return pci_module_init(&skge_driver); 5136 return pci_register_driver(&skge_driver);
5137} 5137}
5138 5138
5139static void __exit skge_exit(void) 5139static void __exit skge_exit(void)
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index b5714a60237d..8e4d18440a56 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -2280,7 +2280,7 @@ static struct pci_driver skfddi_pci_driver = {
2280 2280
2281static int __init skfd_init(void) 2281static int __init skfd_init(void)
2282{ 2282{
2283 return pci_module_init(&skfddi_pci_driver); 2283 return pci_register_driver(&skfddi_pci_driver);
2284} 2284}
2285 2285
2286static void __exit skfd_exit(void) 2286static void __exit skfd_exit(void)
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..fba8b7455d8b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,7 +43,7 @@
43#include "skge.h" 43#include "skge.h"
44 44
45#define DRV_NAME "skge" 45#define DRV_NAME "skge"
46#define DRV_VERSION "1.6" 46#define DRV_VERSION "1.8"
47#define PFX DRV_NAME " " 47#define PFX DRV_NAME " "
48 48
49#define DEFAULT_TX_RING_SIZE 128 49#define DEFAULT_TX_RING_SIZE 128
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
91static int skge_up(struct net_device *dev); 91static int skge_up(struct net_device *dev);
92static int skge_down(struct net_device *dev); 92static int skge_down(struct net_device *dev);
93static void skge_phy_reset(struct skge_port *skge); 93static void skge_phy_reset(struct skge_port *skge);
94static void skge_tx_clean(struct skge_port *skge); 94static void skge_tx_clean(struct net_device *dev);
95static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 95static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
96static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 96static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
97static void genesis_get_stats(struct skge_port *skge, u64 *data); 97static void genesis_get_stats(struct skge_port *skge, u64 *data);
@@ -105,6 +105,7 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
105static const int rxqaddr[] = { Q_R1, Q_R2 }; 105static const int rxqaddr[] = { Q_R1, Q_R2 };
106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
108static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
108 109
109static int skge_get_regs_len(struct net_device *dev) 110static int skge_get_regs_len(struct net_device *dev)
110{ 111{
@@ -516,10 +517,7 @@ static int skge_set_pauseparam(struct net_device *dev,
516/* Chip internal frequency for clock calculations */ 517/* Chip internal frequency for clock calculations */
517static inline u32 hwkhz(const struct skge_hw *hw) 518static inline u32 hwkhz(const struct skge_hw *hw)
518{ 519{
519 if (hw->chip_id == CHIP_ID_GENESIS) 520 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 return 53215; /* or: 53.125 MHz */
521 else
522 return 78215; /* or: 78.125 MHz */
523} 521}
524 522
525/* Chip HZ to microseconds */ 523/* Chip HZ to microseconds */
@@ -821,8 +819,9 @@ static void skge_rx_clean(struct skge_port *skge)
821/* Allocate buffers for receive ring 819/* Allocate buffers for receive ring
822 * For receive: to_clean is next received frame. 820 * For receive: to_clean is next received frame.
823 */ 821 */
824static int skge_rx_fill(struct skge_port *skge) 822static int skge_rx_fill(struct net_device *dev)
825{ 823{
824 struct skge_port *skge = netdev_priv(dev);
826 struct skge_ring *ring = &skge->rx_ring; 825 struct skge_ring *ring = &skge->rx_ring;
827 struct skge_element *e; 826 struct skge_element *e;
828 827
@@ -830,7 +829,8 @@ static int skge_rx_fill(struct skge_port *skge)
830 do { 829 do {
831 struct sk_buff *skb; 830 struct sk_buff *skb;
832 831
833 skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); 832 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
833 GFP_KERNEL);
834 if (!skb) 834 if (!skb)
835 return -ENOMEM; 835 return -ENOMEM;
836 836
@@ -2181,7 +2181,7 @@ static int skge_up(struct net_device *dev)
2181 if (err) 2181 if (err)
2182 goto free_pci_mem; 2182 goto free_pci_mem;
2183 2183
2184 err = skge_rx_fill(skge); 2184 err = skge_rx_fill(dev);
2185 if (err) 2185 if (err)
2186 goto free_rx_ring; 2186 goto free_rx_ring;
2187 2187
@@ -2214,6 +2214,7 @@ static int skge_up(struct net_device *dev)
2214 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2214 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2215 skge_led(skge, LED_MODE_ON); 2215 skge_led(skge, LED_MODE_ON);
2216 2216
2217 netif_poll_enable(dev);
2217 return 0; 2218 return 0;
2218 2219
2219 free_rx_ring: 2220 free_rx_ring:
@@ -2282,7 +2283,8 @@ static int skge_down(struct net_device *dev)
2282 2283
2283 skge_led(skge, LED_MODE_OFF); 2284 skge_led(skge, LED_MODE_OFF);
2284 2285
2285 skge_tx_clean(skge); 2286 netif_poll_disable(dev);
2287 skge_tx_clean(dev);
2286 skge_rx_clean(skge); 2288 skge_rx_clean(skge);
2287 2289
2288 kfree(skge->rx_ring.start); 2290 kfree(skge->rx_ring.start);
@@ -2307,25 +2309,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2307 int i; 2309 int i;
2308 u32 control, len; 2310 u32 control, len;
2309 u64 map; 2311 u64 map;
2310 unsigned long flags;
2311 2312
2312 if (skb_padto(skb, ETH_ZLEN)) 2313 if (skb_padto(skb, ETH_ZLEN))
2313 return NETDEV_TX_OK; 2314 return NETDEV_TX_OK;
2314 2315
2315 if (!spin_trylock_irqsave(&skge->tx_lock, flags)) 2316 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
2316 /* Collision - tell upper layer to requeue */
2317 return NETDEV_TX_LOCKED;
2318
2319 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2320 if (!netif_queue_stopped(dev)) {
2321 netif_stop_queue(dev);
2322
2323 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2324 dev->name);
2325 }
2326 spin_unlock_irqrestore(&skge->tx_lock, flags);
2327 return NETDEV_TX_BUSY; 2317 return NETDEV_TX_BUSY;
2328 }
2329 2318
2330 e = skge->tx_ring.to_use; 2319 e = skge->tx_ring.to_use;
2331 td = e->desc; 2320 td = e->desc;
@@ -2400,8 +2389,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2400 netif_stop_queue(dev); 2389 netif_stop_queue(dev);
2401 } 2390 }
2402 2391
2403 spin_unlock_irqrestore(&skge->tx_lock, flags);
2404
2405 dev->trans_start = jiffies; 2392 dev->trans_start = jiffies;
2406 2393
2407 return NETDEV_TX_OK; 2394 return NETDEV_TX_OK;
@@ -2431,18 +2418,18 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2431 printk(KERN_DEBUG PFX "%s: tx done slot %td\n", 2418 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2432 skge->netdev->name, e - skge->tx_ring.start); 2419 skge->netdev->name, e - skge->tx_ring.start);
2433 2420
2434 dev_kfree_skb_any(e->skb); 2421 dev_kfree_skb(e->skb);
2435 } 2422 }
2436 e->skb = NULL; 2423 e->skb = NULL;
2437} 2424}
2438 2425
2439/* Free all buffers in transmit ring */ 2426/* Free all buffers in transmit ring */
2440static void skge_tx_clean(struct skge_port *skge) 2427static void skge_tx_clean(struct net_device *dev)
2441{ 2428{
2429 struct skge_port *skge = netdev_priv(dev);
2442 struct skge_element *e; 2430 struct skge_element *e;
2443 unsigned long flags;
2444 2431
2445 spin_lock_irqsave(&skge->tx_lock, flags); 2432 netif_tx_lock_bh(dev);
2446 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2433 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2447 struct skge_tx_desc *td = e->desc; 2434 struct skge_tx_desc *td = e->desc;
2448 skge_tx_free(skge, e, td->control); 2435 skge_tx_free(skge, e, td->control);
@@ -2450,8 +2437,8 @@ static void skge_tx_clean(struct skge_port *skge)
2450 } 2437 }
2451 2438
2452 skge->tx_ring.to_clean = e; 2439 skge->tx_ring.to_clean = e;
2453 netif_wake_queue(skge->netdev); 2440 netif_wake_queue(dev);
2454 spin_unlock_irqrestore(&skge->tx_lock, flags); 2441 netif_tx_unlock_bh(dev);
2455} 2442}
2456 2443
2457static void skge_tx_timeout(struct net_device *dev) 2444static void skge_tx_timeout(struct net_device *dev)
@@ -2462,7 +2449,7 @@ static void skge_tx_timeout(struct net_device *dev)
2462 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); 2449 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2463 2450
2464 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2451 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2465 skge_tx_clean(skge); 2452 skge_tx_clean(dev);
2466} 2453}
2467 2454
2468static int skge_change_mtu(struct net_device *dev, int new_mtu) 2455static int skge_change_mtu(struct net_device *dev, int new_mtu)
@@ -2585,16 +2572,17 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2585/* Get receive buffer from descriptor. 2572/* Get receive buffer from descriptor.
2586 * Handles copy of small buffers and reallocation failures 2573 * Handles copy of small buffers and reallocation failures
2587 */ 2574 */
2588static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2575static struct sk_buff *skge_rx_get(struct net_device *dev,
2589 struct skge_element *e, 2576 struct skge_element *e,
2590 u32 control, u32 status, u16 csum) 2577 u32 control, u32 status, u16 csum)
2591{ 2578{
2579 struct skge_port *skge = netdev_priv(dev);
2592 struct sk_buff *skb; 2580 struct sk_buff *skb;
2593 u16 len = control & BMU_BBC; 2581 u16 len = control & BMU_BBC;
2594 2582
2595 if (unlikely(netif_msg_rx_status(skge))) 2583 if (unlikely(netif_msg_rx_status(skge)))
2596 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 2584 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2597 skge->netdev->name, e - skge->rx_ring.start, 2585 dev->name, e - skge->rx_ring.start,
2598 status, len); 2586 status, len);
2599 2587
2600 if (len > skge->rx_buf_size) 2588 if (len > skge->rx_buf_size)
@@ -2610,7 +2598,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2610 goto error; 2598 goto error;
2611 2599
2612 if (len < RX_COPY_THRESHOLD) { 2600 if (len < RX_COPY_THRESHOLD) {
2613 skb = alloc_skb(len + 2, GFP_ATOMIC); 2601 skb = netdev_alloc_skb(dev, len + 2);
2614 if (!skb) 2602 if (!skb)
2615 goto resubmit; 2603 goto resubmit;
2616 2604
@@ -2625,7 +2613,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2625 skge_rx_reuse(e, skge->rx_buf_size); 2613 skge_rx_reuse(e, skge->rx_buf_size);
2626 } else { 2614 } else {
2627 struct sk_buff *nskb; 2615 struct sk_buff *nskb;
2628 nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC); 2616 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
2629 if (!nskb) 2617 if (!nskb)
2630 goto resubmit; 2618 goto resubmit;
2631 2619
@@ -2640,20 +2628,19 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2640 } 2628 }
2641 2629
2642 skb_put(skb, len); 2630 skb_put(skb, len);
2643 skb->dev = skge->netdev;
2644 if (skge->rx_csum) { 2631 if (skge->rx_csum) {
2645 skb->csum = csum; 2632 skb->csum = csum;
2646 skb->ip_summed = CHECKSUM_HW; 2633 skb->ip_summed = CHECKSUM_HW;
2647 } 2634 }
2648 2635
2649 skb->protocol = eth_type_trans(skb, skge->netdev); 2636 skb->protocol = eth_type_trans(skb, dev);
2650 2637
2651 return skb; 2638 return skb;
2652error: 2639error:
2653 2640
2654 if (netif_msg_rx_err(skge)) 2641 if (netif_msg_rx_err(skge))
2655 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", 2642 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2656 skge->netdev->name, e - skge->rx_ring.start, 2643 dev->name, e - skge->rx_ring.start,
2657 control, status); 2644 control, status);
2658 2645
2659 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 2646 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
@@ -2678,15 +2665,15 @@ resubmit:
2678} 2665}
2679 2666
2680/* Free all buffers in Tx ring which are no longer owned by device */ 2667/* Free all buffers in Tx ring which are no longer owned by device */
2681static void skge_txirq(struct net_device *dev) 2668static void skge_tx_done(struct net_device *dev)
2682{ 2669{
2683 struct skge_port *skge = netdev_priv(dev); 2670 struct skge_port *skge = netdev_priv(dev);
2684 struct skge_ring *ring = &skge->tx_ring; 2671 struct skge_ring *ring = &skge->tx_ring;
2685 struct skge_element *e; 2672 struct skge_element *e;
2686 2673
2687 rmb(); 2674 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2688 2675
2689 spin_lock(&skge->tx_lock); 2676 netif_tx_lock(dev);
2690 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 2677 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2691 struct skge_tx_desc *td = e->desc; 2678 struct skge_tx_desc *td = e->desc;
2692 2679
@@ -2697,11 +2684,10 @@ static void skge_txirq(struct net_device *dev)
2697 } 2684 }
2698 skge->tx_ring.to_clean = e; 2685 skge->tx_ring.to_clean = e;
2699 2686
2700 if (netif_queue_stopped(skge->netdev) 2687 if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
2701 && skge_avail(&skge->tx_ring) > TX_LOW_WATER) 2688 netif_wake_queue(dev);
2702 netif_wake_queue(skge->netdev);
2703 2689
2704 spin_unlock(&skge->tx_lock); 2690 netif_tx_unlock(dev);
2705} 2691}
2706 2692
2707static int skge_poll(struct net_device *dev, int *budget) 2693static int skge_poll(struct net_device *dev, int *budget)
@@ -2713,6 +2699,10 @@ static int skge_poll(struct net_device *dev, int *budget)
2713 int to_do = min(dev->quota, *budget); 2699 int to_do = min(dev->quota, *budget);
2714 int work_done = 0; 2700 int work_done = 0;
2715 2701
2702 skge_tx_done(dev);
2703
2704 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2705
2716 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 2706 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2717 struct skge_rx_desc *rd = e->desc; 2707 struct skge_rx_desc *rd = e->desc;
2718 struct sk_buff *skb; 2708 struct sk_buff *skb;
@@ -2723,7 +2713,7 @@ static int skge_poll(struct net_device *dev, int *budget)
2723 if (control & BMU_OWN) 2713 if (control & BMU_OWN)
2724 break; 2714 break;
2725 2715
2726 skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); 2716 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
2727 if (likely(skb)) { 2717 if (likely(skb)) {
2728 dev->last_rx = jiffies; 2718 dev->last_rx = jiffies;
2729 netif_receive_skb(skb); 2719 netif_receive_skb(skb);
@@ -2743,12 +2733,11 @@ static int skge_poll(struct net_device *dev, int *budget)
2743 if (work_done >= to_do) 2733 if (work_done >= to_do)
2744 return 1; /* not done */ 2734 return 1; /* not done */
2745 2735
2746 netif_rx_complete(dev);
2747
2748 spin_lock_irq(&hw->hw_lock); 2736 spin_lock_irq(&hw->hw_lock);
2749 hw->intr_mask |= rxirqmask[skge->port]; 2737 __netif_rx_complete(dev);
2738 hw->intr_mask |= irqmask[skge->port];
2750 skge_write32(hw, B0_IMSK, hw->intr_mask); 2739 skge_write32(hw, B0_IMSK, hw->intr_mask);
2751 mmiowb(); 2740 skge_read32(hw, B0_IMSK);
2752 spin_unlock_irq(&hw->hw_lock); 2741 spin_unlock_irq(&hw->hw_lock);
2753 2742
2754 return 0; 2743 return 0;
@@ -2882,6 +2871,7 @@ static void skge_extirq(void *arg)
2882 spin_lock_irq(&hw->hw_lock); 2871 spin_lock_irq(&hw->hw_lock);
2883 hw->intr_mask |= IS_EXT_REG; 2872 hw->intr_mask |= IS_EXT_REG;
2884 skge_write32(hw, B0_IMSK, hw->intr_mask); 2873 skge_write32(hw, B0_IMSK, hw->intr_mask);
2874 skge_read32(hw, B0_IMSK);
2885 spin_unlock_irq(&hw->hw_lock); 2875 spin_unlock_irq(&hw->hw_lock);
2886} 2876}
2887 2877
@@ -2889,27 +2879,23 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2889{ 2879{
2890 struct skge_hw *hw = dev_id; 2880 struct skge_hw *hw = dev_id;
2891 u32 status; 2881 u32 status;
2882 int handled = 0;
2892 2883
2884 spin_lock(&hw->hw_lock);
2893 /* Reading this register masks IRQ */ 2885 /* Reading this register masks IRQ */
2894 status = skge_read32(hw, B0_SP_ISRC); 2886 status = skge_read32(hw, B0_SP_ISRC);
2895 if (status == 0) 2887 if (status == 0 || status == ~0)
2896 return IRQ_NONE; 2888 goto out;
2897 2889
2898 spin_lock(&hw->hw_lock); 2890 handled = 1;
2899 status &= hw->intr_mask; 2891 status &= hw->intr_mask;
2900 if (status & IS_EXT_REG) { 2892 if (status & IS_EXT_REG) {
2901 hw->intr_mask &= ~IS_EXT_REG; 2893 hw->intr_mask &= ~IS_EXT_REG;
2902 schedule_work(&hw->phy_work); 2894 schedule_work(&hw->phy_work);
2903 } 2895 }
2904 2896
2905 if (status & IS_XA1_F) { 2897 if (status & (IS_XA1_F|IS_R1_F)) {
2906 skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F); 2898 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
2907 skge_txirq(hw->dev[0]);
2908 }
2909
2910 if (status & IS_R1_F) {
2911 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2912 hw->intr_mask &= ~IS_R1_F;
2913 netif_rx_schedule(hw->dev[0]); 2899 netif_rx_schedule(hw->dev[0]);
2914 } 2900 }
2915 2901
@@ -2928,14 +2914,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2928 skge_mac_intr(hw, 0); 2914 skge_mac_intr(hw, 0);
2929 2915
2930 if (hw->dev[1]) { 2916 if (hw->dev[1]) {
2931 if (status & IS_XA2_F) { 2917 if (status & (IS_XA2_F|IS_R2_F)) {
2932 skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F); 2918 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
2933 skge_txirq(hw->dev[1]);
2934 }
2935
2936 if (status & IS_R2_F) {
2937 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2938 hw->intr_mask &= ~IS_R2_F;
2939 netif_rx_schedule(hw->dev[1]); 2919 netif_rx_schedule(hw->dev[1]);
2940 } 2920 }
2941 2921
@@ -2956,9 +2936,11 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2956 skge_error_irq(hw); 2936 skge_error_irq(hw);
2957 2937
2958 skge_write32(hw, B0_IMSK, hw->intr_mask); 2938 skge_write32(hw, B0_IMSK, hw->intr_mask);
2939 skge_read32(hw, B0_IMSK);
2940out:
2959 spin_unlock(&hw->hw_lock); 2941 spin_unlock(&hw->hw_lock);
2960 2942
2961 return IRQ_HANDLED; 2943 return IRQ_RETVAL(handled);
2962} 2944}
2963 2945
2964#ifdef CONFIG_NET_POLL_CONTROLLER 2946#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3107,7 +3089,6 @@ static int skge_reset(struct skge_hw *hw)
3107 else 3089 else
3108 hw->ram_size = t8 * 4096; 3090 hw->ram_size = t8 * 4096;
3109 3091
3110 spin_lock_init(&hw->hw_lock);
3111 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; 3092 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3112 if (hw->ports > 1) 3093 if (hw->ports > 1)
3113 hw->intr_mask |= IS_PORT_2; 3094 hw->intr_mask |= IS_PORT_2;
@@ -3223,7 +3204,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3223 dev->poll_controller = skge_netpoll; 3204 dev->poll_controller = skge_netpoll;
3224#endif 3205#endif
3225 dev->irq = hw->pdev->irq; 3206 dev->irq = hw->pdev->irq;
3226 dev->features = NETIF_F_LLTX; 3207
3227 if (highmem) 3208 if (highmem)
3228 dev->features |= NETIF_F_HIGHDMA; 3209 dev->features |= NETIF_F_HIGHDMA;
3229 3210
@@ -3245,8 +3226,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3245 3226
3246 skge->port = port; 3227 skge->port = port;
3247 3228
3248 spin_lock_init(&skge->tx_lock);
3249
3250 if (hw->chip_id != CHIP_ID_GENESIS) { 3229 if (hw->chip_id != CHIP_ID_GENESIS) {
3251 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3230 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3252 skge->rx_csum = 1; 3231 skge->rx_csum = 1;
@@ -3333,6 +3312,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3333 hw->pdev = pdev; 3312 hw->pdev = pdev;
3334 mutex_init(&hw->phy_mutex); 3313 mutex_init(&hw->phy_mutex);
3335 INIT_WORK(&hw->phy_work, skge_extirq, hw); 3314 INIT_WORK(&hw->phy_work, skge_extirq, hw);
3315 spin_lock_init(&hw->hw_lock);
3336 3316
3337 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3317 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3338 if (!hw->regs) { 3318 if (!hw->regs) {
@@ -3341,23 +3321,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3341 goto err_out_free_hw; 3321 goto err_out_free_hw;
3342 } 3322 }
3343 3323
3344 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw);
3345 if (err) {
3346 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3347 pci_name(pdev), pdev->irq);
3348 goto err_out_iounmap;
3349 }
3350 pci_set_drvdata(pdev, hw);
3351
3352 err = skge_reset(hw); 3324 err = skge_reset(hw);
3353 if (err) 3325 if (err)
3354 goto err_out_free_irq; 3326 goto err_out_iounmap;
3355 3327
3356 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", 3328 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
3357 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 3329 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
3358 skge_board_name(hw), hw->chip_rev); 3330 skge_board_name(hw), hw->chip_rev);
3359 3331
3360 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) 3332 dev = skge_devinit(hw, 0, using_dac);
3333 if (!dev)
3361 goto err_out_led_off; 3334 goto err_out_led_off;
3362 3335
3363 if (!is_valid_ether_addr(dev->dev_addr)) { 3336 if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -3367,7 +3340,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3367 goto err_out_free_netdev; 3340 goto err_out_free_netdev;
3368 } 3341 }
3369 3342
3370
3371 err = register_netdev(dev); 3343 err = register_netdev(dev);
3372 if (err) { 3344 if (err) {
3373 printk(KERN_ERR PFX "%s: cannot register net device\n", 3345 printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3375,6 +3347,12 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3375 goto err_out_free_netdev; 3347 goto err_out_free_netdev;
3376 } 3348 }
3377 3349
3350 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
3351 if (err) {
3352 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3353 dev->name, pdev->irq);
3354 goto err_out_unregister;
3355 }
3378 skge_show_addr(dev); 3356 skge_show_addr(dev);
3379 3357
3380 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3358 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
@@ -3387,15 +3365,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3387 free_netdev(dev1); 3365 free_netdev(dev1);
3388 } 3366 }
3389 } 3367 }
3368 pci_set_drvdata(pdev, hw);
3390 3369
3391 return 0; 3370 return 0;
3392 3371
3372err_out_unregister:
3373 unregister_netdev(dev);
3393err_out_free_netdev: 3374err_out_free_netdev:
3394 free_netdev(dev); 3375 free_netdev(dev);
3395err_out_led_off: 3376err_out_led_off:
3396 skge_write16(hw, B0_LED, LED_STAT_OFF); 3377 skge_write16(hw, B0_LED, LED_STAT_OFF);
3397err_out_free_irq:
3398 free_irq(pdev->irq, hw);
3399err_out_iounmap: 3378err_out_iounmap:
3400 iounmap(hw->regs); 3379 iounmap(hw->regs);
3401err_out_free_hw: 3380err_out_free_hw:
@@ -3425,6 +3404,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3425 spin_lock_irq(&hw->hw_lock); 3404 spin_lock_irq(&hw->hw_lock);
3426 hw->intr_mask = 0; 3405 hw->intr_mask = 0;
3427 skge_write32(hw, B0_IMSK, 0); 3406 skge_write32(hw, B0_IMSK, 0);
3407 skge_read32(hw, B0_IMSK);
3428 spin_unlock_irq(&hw->hw_lock); 3408 spin_unlock_irq(&hw->hw_lock);
3429 3409
3430 skge_write16(hw, B0_LED, LED_STAT_OFF); 3410 skge_write16(hw, B0_LED, LED_STAT_OFF);
@@ -3450,26 +3430,25 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3450 struct skge_hw *hw = pci_get_drvdata(pdev); 3430 struct skge_hw *hw = pci_get_drvdata(pdev);
3451 int i, wol = 0; 3431 int i, wol = 0;
3452 3432
3453 for (i = 0; i < 2; i++) { 3433 pci_save_state(pdev);
3434 for (i = 0; i < hw->ports; i++) {
3454 struct net_device *dev = hw->dev[i]; 3435 struct net_device *dev = hw->dev[i];
3455 3436
3456 if (dev) { 3437 if (netif_running(dev)) {
3457 struct skge_port *skge = netdev_priv(dev); 3438 struct skge_port *skge = netdev_priv(dev);
3458 if (netif_running(dev)) { 3439
3459 netif_carrier_off(dev); 3440 netif_carrier_off(dev);
3460 if (skge->wol) 3441 if (skge->wol)
3461 netif_stop_queue(dev); 3442 netif_stop_queue(dev);
3462 else 3443 else
3463 skge_down(dev); 3444 skge_down(dev);
3464 }
3465 netif_device_detach(dev);
3466 wol |= skge->wol; 3445 wol |= skge->wol;
3467 } 3446 }
3447 netif_device_detach(dev);
3468 } 3448 }
3469 3449
3470 pci_save_state(pdev); 3450 skge_write32(hw, B0_IMSK, 0);
3471 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 3451 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3472 pci_disable_device(pdev);
3473 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3452 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3474 3453
3475 return 0; 3454 return 0;
@@ -3478,23 +3457,33 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3478static int skge_resume(struct pci_dev *pdev) 3457static int skge_resume(struct pci_dev *pdev)
3479{ 3458{
3480 struct skge_hw *hw = pci_get_drvdata(pdev); 3459 struct skge_hw *hw = pci_get_drvdata(pdev);
3481 int i; 3460 int i, err;
3482 3461
3483 pci_set_power_state(pdev, PCI_D0); 3462 pci_set_power_state(pdev, PCI_D0);
3484 pci_restore_state(pdev); 3463 pci_restore_state(pdev);
3485 pci_enable_wake(pdev, PCI_D0, 0); 3464 pci_enable_wake(pdev, PCI_D0, 0);
3486 3465
3487 skge_reset(hw); 3466 err = skge_reset(hw);
3467 if (err)
3468 goto out;
3488 3469
3489 for (i = 0; i < 2; i++) { 3470 for (i = 0; i < hw->ports; i++) {
3490 struct net_device *dev = hw->dev[i]; 3471 struct net_device *dev = hw->dev[i];
3491 if (dev) { 3472
3492 netif_device_attach(dev); 3473 netif_device_attach(dev);
3493 if (netif_running(dev) && skge_up(dev)) 3474 if (netif_running(dev)) {
3475 err = skge_up(dev);
3476
3477 if (err) {
3478 printk(KERN_ERR PFX "%s: could not up: %d\n",
3479 dev->name, err);
3494 dev_close(dev); 3480 dev_close(dev);
3481 goto out;
3482 }
3495 } 3483 }
3496 } 3484 }
3497 return 0; 3485out:
3486 return err;
3498} 3487}
3499#endif 3488#endif
3500 3489
@@ -3511,7 +3500,7 @@ static struct pci_driver skge_driver = {
3511 3500
3512static int __init skge_init_module(void) 3501static int __init skge_init_module(void)
3513{ 3502{
3514 return pci_module_init(&skge_driver); 3503 return pci_register_driver(&skge_driver);
3515} 3504}
3516 3505
3517static void __exit skge_cleanup_module(void) 3506static void __exit skge_cleanup_module(void)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 593387b3c0dd..79e09271bcf9 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2417,7 +2417,6 @@ struct skge_port {
2417 struct net_device *netdev; 2417 struct net_device *netdev;
2418 int port; 2418 int port;
2419 2419
2420 spinlock_t tx_lock;
2421 struct skge_ring tx_ring; 2420 struct skge_ring tx_ring;
2422 struct skge_ring rx_ring; 2421 struct skge_ring rx_ring;
2423 2422
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index de91609ca112..7ce0663baf45 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.5" 53#define DRV_VERSION "1.7"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -106,6 +106,7 @@ static const struct pci_device_id sky2_id_table[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
109 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, 112 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -117,10 +118,17 @@ static const struct pci_device_id sky2_id_table[] = {
117 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) }, 118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
118 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) }, 119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
119 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) }, 120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4352) },
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4353) },
120 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) }, 122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, 123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, 124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, 125 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
126 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
127 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4365) },
128 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4366) },
129 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4367) },
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) },
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) },
124 { 0 } 132 { 0 }
125}; 133};
126 134
@@ -190,7 +198,6 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
190static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) 198static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
191{ 199{
192 u16 power_control; 200 u16 power_control;
193 u32 reg1;
194 int vaux; 201 int vaux;
195 202
196 pr_debug("sky2_set_power_state %d\n", state); 203 pr_debug("sky2_set_power_state %d\n", state);
@@ -223,18 +230,9 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
223 else 230 else
224 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 231 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
225 232
226 /* Turn off phy power saving */
227 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
228 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
229
230 /* looks like this XL is back asswards .. */
231 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
232 reg1 |= PCI_Y2_PHY1_COMA;
233 if (hw->ports > 1)
234 reg1 |= PCI_Y2_PHY2_COMA;
235 }
236
237 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 233 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
234 u32 reg1;
235
238 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 236 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
239 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 237 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
240 reg1 &= P_ASPM_CONTROL_MSK; 238 reg1 &= P_ASPM_CONTROL_MSK;
@@ -242,22 +240,10 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
242 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 240 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
243 } 241 }
244 242
245 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
246 udelay(100);
247
248 break; 243 break;
249 244
250 case PCI_D3hot: 245 case PCI_D3hot:
251 case PCI_D3cold: 246 case PCI_D3cold:
252 /* Turn on phy power saving */
253 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
254 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
255 reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
256 else
257 reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
258 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
259 udelay(100);
260
261 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 247 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
262 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 248 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
263 else 249 else
@@ -281,7 +267,7 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
281 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 267 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
282} 268}
283 269
284static void sky2_phy_reset(struct sky2_hw *hw, unsigned port) 270static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port)
285{ 271{
286 u16 reg; 272 u16 reg;
287 273
@@ -529,6 +515,29 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
529 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 515 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
530} 516}
531 517
518static void sky2_phy_power(struct sky2_hw *hw, unsigned port, int onoff)
519{
520 u32 reg1;
521 static const u32 phy_power[]
522 = { PCI_Y2_PHY1_POWD, PCI_Y2_PHY2_POWD };
523
524 /* looks like this XL is back asswards .. */
525 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
526 onoff = !onoff;
527
528 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
529
530 if (onoff)
531 /* Turn off phy power saving */
532 reg1 &= ~phy_power[port];
533 else
534 reg1 |= phy_power[port];
535
536 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
537 sky2_pci_read32(hw, PCI_DEV_REG1);
538 udelay(100);
539}
540
532/* Force a renegotiation */ 541/* Force a renegotiation */
533static void sky2_phy_reinit(struct sky2_port *sky2) 542static void sky2_phy_reinit(struct sky2_port *sky2)
534{ 543{
@@ -761,9 +770,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
761/* Update chip's next pointer */ 770/* Update chip's next pointer */
762static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) 771static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx)
763{ 772{
773 q = Y2_QADDR(q, PREF_UNIT_PUT_IDX);
764 wmb(); 774 wmb();
765 sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); 775 sky2_write16(hw, q, idx);
766 mmiowb(); 776 sky2_read16(hw, q);
767} 777}
768 778
769 779
@@ -950,14 +960,16 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
950/* 960/*
951 * It appears the hardware has a bug in the FIFO logic that 961 * It appears the hardware has a bug in the FIFO logic that
952 * cause it to hang if the FIFO gets overrun and the receive buffer 962 * cause it to hang if the FIFO gets overrun and the receive buffer
953 * is not aligned. ALso alloc_skb() won't align properly if slab 963 * is not 64 byte aligned. The buffer returned from netdev_alloc_skb is
954 * debugging is enabled. 964 * aligned except if slab debugging is enabled.
955 */ 965 */
956static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask) 966static inline struct sk_buff *sky2_alloc_skb(struct net_device *dev,
967 unsigned int length,
968 gfp_t gfp_mask)
957{ 969{
958 struct sk_buff *skb; 970 struct sk_buff *skb;
959 971
960 skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask); 972 skb = __netdev_alloc_skb(dev, length + RX_SKB_ALIGN, gfp_mask);
961 if (likely(skb)) { 973 if (likely(skb)) {
962 unsigned long p = (unsigned long) skb->data; 974 unsigned long p = (unsigned long) skb->data;
963 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p); 975 skb_reserve(skb, ALIGN(p, RX_SKB_ALIGN) - p);
@@ -993,7 +1005,8 @@ static int sky2_rx_start(struct sky2_port *sky2)
993 for (i = 0; i < sky2->rx_pending; i++) { 1005 for (i = 0; i < sky2->rx_pending; i++) {
994 struct ring_info *re = sky2->rx_ring + i; 1006 struct ring_info *re = sky2->rx_ring + i;
995 1007
996 re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL); 1008 re->skb = sky2_alloc_skb(sky2->netdev, sky2->rx_bufsize,
1009 GFP_KERNEL);
997 if (!re->skb) 1010 if (!re->skb)
998 goto nomem; 1011 goto nomem;
999 1012
@@ -1081,6 +1094,8 @@ static int sky2_up(struct net_device *dev)
1081 if (!sky2->rx_ring) 1094 if (!sky2->rx_ring)
1082 goto err_out; 1095 goto err_out;
1083 1096
1097 sky2_phy_power(hw, port, 1);
1098
1084 sky2_mac_init(hw, port); 1099 sky2_mac_init(hw, port);
1085 1100
1086 /* Determine available ram buffer space (in 4K blocks). 1101 /* Determine available ram buffer space (in 4K blocks).
@@ -1185,7 +1200,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1185 struct sky2_tx_le *le = NULL; 1200 struct sky2_tx_le *le = NULL;
1186 struct tx_ring_info *re; 1201 struct tx_ring_info *re;
1187 unsigned i, len; 1202 unsigned i, len;
1188 int avail;
1189 dma_addr_t mapping; 1203 dma_addr_t mapping;
1190 u32 addr64; 1204 u32 addr64;
1191 u16 mss; 1205 u16 mss;
@@ -1235,25 +1249,18 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1235 /* Check for TCP Segmentation Offload */ 1249 /* Check for TCP Segmentation Offload */
1236 mss = skb_shinfo(skb)->gso_size; 1250 mss = skb_shinfo(skb)->gso_size;
1237 if (mss != 0) { 1251 if (mss != 0) {
1238 /* just drop the packet if non-linear expansion fails */
1239 if (skb_header_cloned(skb) &&
1240 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
1241 dev_kfree_skb(skb);
1242 goto out_unlock;
1243 }
1244
1245 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */ 1252 mss += ((skb->h.th->doff - 5) * 4); /* TCP options */
1246 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); 1253 mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
1247 mss += ETH_HLEN; 1254 mss += ETH_HLEN;
1248 }
1249 1255
1250 if (mss != sky2->tx_last_mss) { 1256 if (mss != sky2->tx_last_mss) {
1251 le = get_tx_le(sky2); 1257 le = get_tx_le(sky2);
1252 le->tx.tso.size = cpu_to_le16(mss); 1258 le->tx.tso.size = cpu_to_le16(mss);
1253 le->tx.tso.rsvd = 0; 1259 le->tx.tso.rsvd = 0;
1254 le->opcode = OP_LRGLEN | HW_OWNER; 1260 le->opcode = OP_LRGLEN | HW_OWNER;
1255 le->ctrl = 0; 1261 le->ctrl = 0;
1256 sky2->tx_last_mss = mss; 1262 sky2->tx_last_mss = mss;
1263 }
1257 } 1264 }
1258 1265
1259 ctrl = 0; 1266 ctrl = 0;
@@ -1281,12 +1288,17 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1281 if (skb->nh.iph->protocol == IPPROTO_UDP) 1288 if (skb->nh.iph->protocol == IPPROTO_UDP)
1282 ctrl |= UDPTCP; 1289 ctrl |= UDPTCP;
1283 1290
1284 le = get_tx_le(sky2); 1291 if (hdr != sky2->tx_csum_start || offset != sky2->tx_csum_offset) {
1285 le->tx.csum.start = cpu_to_le16(hdr); 1292 sky2->tx_csum_start = hdr;
1286 le->tx.csum.offset = cpu_to_le16(offset); 1293 sky2->tx_csum_offset = offset;
1287 le->length = 0; /* initial checksum value */ 1294
1288 le->ctrl = 1; /* one packet */ 1295 le = get_tx_le(sky2);
1289 le->opcode = OP_TCPLISW | HW_OWNER; 1296 le->tx.csum.start = cpu_to_le16(hdr);
1297 le->tx.csum.offset = cpu_to_le16(offset);
1298 le->length = 0; /* initial checksum value */
1299 le->ctrl = 1; /* one packet */
1300 le->opcode = OP_TCPLISW | HW_OWNER;
1301 }
1290 } 1302 }
1291 1303
1292 le = get_tx_le(sky2); 1304 le = get_tx_le(sky2);
@@ -1321,23 +1333,18 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1321 le->opcode = OP_BUFFER | HW_OWNER; 1333 le->opcode = OP_BUFFER | HW_OWNER;
1322 1334
1323 fre = sky2->tx_ring 1335 fre = sky2->tx_ring
1324 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE); 1336 + RING_NEXT((re - sky2->tx_ring) + i, TX_RING_SIZE);
1325 pci_unmap_addr_set(fre, mapaddr, mapping); 1337 pci_unmap_addr_set(fre, mapaddr, mapping);
1326 } 1338 }
1327 1339
1328 re->idx = sky2->tx_prod; 1340 re->idx = sky2->tx_prod;
1329 le->ctrl |= EOP; 1341 le->ctrl |= EOP;
1330 1342
1331 avail = tx_avail(sky2); 1343 if (tx_avail(sky2) <= MAX_SKB_TX_LE)
1332 if (mss != 0 || avail < TX_MIN_PENDING) { 1344 netif_stop_queue(dev);
1333 le->ctrl |= FRC_STAT;
1334 if (avail <= MAX_SKB_TX_LE)
1335 netif_stop_queue(dev);
1336 }
1337 1345
1338 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); 1346 sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
1339 1347
1340out_unlock:
1341 spin_unlock(&sky2->tx_lock); 1348 spin_unlock(&sky2->tx_lock);
1342 1349
1343 dev->trans_start = jiffies; 1350 dev->trans_start = jiffies;
@@ -1422,7 +1429,7 @@ static int sky2_down(struct net_device *dev)
1422 /* Stop more packets from being queued */ 1429 /* Stop more packets from being queued */
1423 netif_stop_queue(dev); 1430 netif_stop_queue(dev);
1424 1431
1425 sky2_phy_reset(hw, port); 1432 sky2_gmac_reset(hw, port);
1426 1433
1427 /* Stop transmitter */ 1434 /* Stop transmitter */
1428 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP); 1435 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
@@ -1470,6 +1477,8 @@ static int sky2_down(struct net_device *dev)
1470 imask &= ~portirq_msk[port]; 1477 imask &= ~portirq_msk[port];
1471 sky2_write32(hw, B0_IMSK, imask); 1478 sky2_write32(hw, B0_IMSK, imask);
1472 1479
1480 sky2_phy_power(hw, port, 0);
1481
1473 /* turn off LED's */ 1482 /* turn off LED's */
1474 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); 1483 sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
1475 1484
@@ -1833,15 +1842,16 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1833 * For small packets or errors, just reuse existing skb. 1842 * For small packets or errors, just reuse existing skb.
1834 * For larger packets, get new buffer. 1843 * For larger packets, get new buffer.
1835 */ 1844 */
1836static struct sk_buff *sky2_receive(struct sky2_port *sky2, 1845static struct sk_buff *sky2_receive(struct net_device *dev,
1837 u16 length, u32 status) 1846 u16 length, u32 status)
1838{ 1847{
1848 struct sky2_port *sky2 = netdev_priv(dev);
1839 struct ring_info *re = sky2->rx_ring + sky2->rx_next; 1849 struct ring_info *re = sky2->rx_ring + sky2->rx_next;
1840 struct sk_buff *skb = NULL; 1850 struct sk_buff *skb = NULL;
1841 1851
1842 if (unlikely(netif_msg_rx_status(sky2))) 1852 if (unlikely(netif_msg_rx_status(sky2)))
1843 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n", 1853 printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
1844 sky2->netdev->name, sky2->rx_next, status, length); 1854 dev->name, sky2->rx_next, status, length);
1845 1855
1846 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; 1856 sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
1847 prefetch(sky2->rx_ring + sky2->rx_next); 1857 prefetch(sky2->rx_ring + sky2->rx_next);
@@ -1852,11 +1862,11 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1852 if (!(status & GMR_FS_RX_OK)) 1862 if (!(status & GMR_FS_RX_OK))
1853 goto resubmit; 1863 goto resubmit;
1854 1864
1855 if (length > sky2->netdev->mtu + ETH_HLEN) 1865 if (length > dev->mtu + ETH_HLEN)
1856 goto oversize; 1866 goto oversize;
1857 1867
1858 if (length < copybreak) { 1868 if (length < copybreak) {
1859 skb = alloc_skb(length + 2, GFP_ATOMIC); 1869 skb = netdev_alloc_skb(dev, length + 2);
1860 if (!skb) 1870 if (!skb)
1861 goto resubmit; 1871 goto resubmit;
1862 1872
@@ -1871,7 +1881,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
1871 } else { 1881 } else {
1872 struct sk_buff *nskb; 1882 struct sk_buff *nskb;
1873 1883
1874 nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC); 1884 nskb = sky2_alloc_skb(dev, sky2->rx_bufsize, GFP_ATOMIC);
1875 if (!nskb) 1885 if (!nskb)
1876 goto resubmit; 1886 goto resubmit;
1877 1887
@@ -1901,7 +1911,7 @@ error:
1901 1911
1902 if (netif_msg_rx_err(sky2) && net_ratelimit()) 1912 if (netif_msg_rx_err(sky2) && net_ratelimit())
1903 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", 1913 printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
1904 sky2->netdev->name, status, length); 1914 dev->name, status, length);
1905 1915
1906 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE)) 1916 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
1907 sky2->net_stats.rx_length_errors++; 1917 sky2->net_stats.rx_length_errors++;
@@ -1927,12 +1937,6 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
1927 } 1937 }
1928} 1938}
1929 1939
1930/* Is status ring empty or is there more to do? */
1931static inline int sky2_more_work(const struct sky2_hw *hw)
1932{
1933 return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
1934}
1935
1936/* Process status response ring */ 1940/* Process status response ring */
1937static int sky2_status_intr(struct sky2_hw *hw, int to_do) 1941static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1938{ 1942{
@@ -1961,11 +1965,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1961 1965
1962 switch (le->opcode & ~HW_OWNER) { 1966 switch (le->opcode & ~HW_OWNER) {
1963 case OP_RXSTAT: 1967 case OP_RXSTAT:
1964 skb = sky2_receive(sky2, length, status); 1968 skb = sky2_receive(dev, length, status);
1965 if (!skb) 1969 if (!skb)
1966 break; 1970 break;
1967 1971
1968 skb->dev = dev;
1969 skb->protocol = eth_type_trans(skb, dev); 1972 skb->protocol = eth_type_trans(skb, dev);
1970 dev->last_rx = jiffies; 1973 dev->last_rx = jiffies;
1971 1974
@@ -2023,6 +2026,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2023 } 2026 }
2024 } 2027 }
2025 2028
2029 /* Fully processed status ring so clear irq */
2030 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2031
2026exit_loop: 2032exit_loop:
2027 if (buf_write[0]) { 2033 if (buf_write[0]) {
2028 sky2 = netdev_priv(hw->dev[0]); 2034 sky2 = netdev_priv(hw->dev[0]);
@@ -2232,19 +2238,16 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2232 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2238 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2233 2239
2234 work_done = sky2_status_intr(hw, work_limit); 2240 work_done = sky2_status_intr(hw, work_limit);
2235 *budget -= work_done; 2241 if (work_done < work_limit) {
2236 dev0->quota -= work_done; 2242 netif_rx_complete(dev0);
2237 2243
2238 if (status & Y2_IS_STAT_BMU) 2244 sky2_read32(hw, B0_Y2_SP_LISR);
2239 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); 2245 return 0;
2240 2246 } else {
2241 if (sky2_more_work(hw)) 2247 *budget -= work_done;
2248 dev0->quota -= work_done;
2242 return 1; 2249 return 1;
2243 2250 }
2244 netif_rx_complete(dev0);
2245
2246 sky2_read32(hw, B0_Y2_SP_LISR);
2247 return 0;
2248} 2251}
2249 2252
2250static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) 2253static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2410,7 +2413,7 @@ static int sky2_reset(struct sky2_hw *hw)
2410 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK); 2413 sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
2411 2414
2412 for (i = 0; i < hw->ports; i++) 2415 for (i = 0; i < hw->ports; i++)
2413 sky2_phy_reset(hw, i); 2416 sky2_gmac_reset(hw, i);
2414 2417
2415 memset(hw->st_le, 0, STATUS_LE_BYTES); 2418 memset(hw->st_le, 0, STATUS_LE_BYTES);
2416 hw->st_idx = 0; 2419 hw->st_idx = 0;
@@ -3201,6 +3204,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3201 struct pci_dev *pdev = hw->pdev; 3204 struct pci_dev *pdev = hw->pdev;
3202 int err; 3205 int err;
3203 3206
3207 init_waitqueue_head (&hw->msi_wait);
3208
3204 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); 3209 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
3205 3210
3206 err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw); 3211 err = request_irq(pdev->irq, sky2_test_intr, IRQF_SHARED, DRV_NAME, hw);
@@ -3210,10 +3215,8 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
3210 return err; 3215 return err;
3211 } 3216 }
3212 3217
3213 init_waitqueue_head (&hw->msi_wait);
3214
3215 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); 3218 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
3216 wmb(); 3219 sky2_read8(hw, B0_CTST);
3217 3220
3218 wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10); 3221 wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10);
3219 3222
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 2db8d19b22d1..fa8af9f503e4 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1748,7 +1748,6 @@ enum {
1748 INIT_SUM= 1<<3, 1748 INIT_SUM= 1<<3,
1749 LOCK_SUM= 1<<4, 1749 LOCK_SUM= 1<<4,
1750 INS_VLAN= 1<<5, 1750 INS_VLAN= 1<<5,
1751 FRC_STAT= 1<<6,
1752 EOP = 1<<7, 1751 EOP = 1<<7,
1753}; 1752};
1754 1753
@@ -1844,6 +1843,8 @@ struct sky2_port {
1844 u32 tx_addr64; 1843 u32 tx_addr64;
1845 u16 tx_pending; 1844 u16 tx_pending;
1846 u16 tx_last_mss; 1845 u16 tx_last_mss;
1846 u16 tx_csum_start;
1847 u16 tx_csum_offset;
1847 1848
1848 struct ring_info *rx_ring ____cacheline_aligned_in_smp; 1849 struct ring_info *rx_ring ____cacheline_aligned_in_smp;
1849 struct sky2_rx_le *rx_le; 1850 struct sky2_rx_le *rx_le;
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 3a1b7131681c..9a540e2092b9 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -94,27 +94,23 @@ slhc_init(int rslots, int tslots)
94 register struct cstate *ts; 94 register struct cstate *ts;
95 struct slcompress *comp; 95 struct slcompress *comp;
96 96
97 comp = (struct slcompress *)kmalloc(sizeof(struct slcompress), 97 comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
98 GFP_KERNEL);
99 if (! comp) 98 if (! comp)
100 goto out_fail; 99 goto out_fail;
101 memset(comp, 0, sizeof(struct slcompress));
102 100
103 if ( rslots > 0 && rslots < 256 ) { 101 if ( rslots > 0 && rslots < 256 ) {
104 size_t rsize = rslots * sizeof(struct cstate); 102 size_t rsize = rslots * sizeof(struct cstate);
105 comp->rstate = (struct cstate *) kmalloc(rsize, GFP_KERNEL); 103 comp->rstate = kzalloc(rsize, GFP_KERNEL);
106 if (! comp->rstate) 104 if (! comp->rstate)
107 goto out_free; 105 goto out_free;
108 memset(comp->rstate, 0, rsize);
109 comp->rslot_limit = rslots - 1; 106 comp->rslot_limit = rslots - 1;
110 } 107 }
111 108
112 if ( tslots > 0 && tslots < 256 ) { 109 if ( tslots > 0 && tslots < 256 ) {
113 size_t tsize = tslots * sizeof(struct cstate); 110 size_t tsize = tslots * sizeof(struct cstate);
114 comp->tstate = (struct cstate *) kmalloc(tsize, GFP_KERNEL); 111 comp->tstate = kzalloc(tsize, GFP_KERNEL);
115 if (! comp->tstate) 112 if (! comp->tstate)
116 goto out_free2; 113 goto out_free2;
117 memset(comp->tstate, 0, tsize);
118 comp->tslot_limit = tslots - 1; 114 comp->tslot_limit = tslots - 1;
119 } 115 }
120 116
@@ -141,9 +137,9 @@ slhc_init(int rslots, int tslots)
141 return comp; 137 return comp;
142 138
143out_free2: 139out_free2:
144 kfree((unsigned char *)comp->rstate); 140 kfree(comp->rstate);
145out_free: 141out_free:
146 kfree((unsigned char *)comp); 142 kfree(comp);
147out_fail: 143out_fail:
148 return NULL; 144 return NULL;
149} 145}
@@ -700,20 +696,6 @@ EXPORT_SYMBOL(slhc_compress);
700EXPORT_SYMBOL(slhc_uncompress); 696EXPORT_SYMBOL(slhc_uncompress);
701EXPORT_SYMBOL(slhc_toss); 697EXPORT_SYMBOL(slhc_toss);
702 698
703#ifdef MODULE
704
705int init_module(void)
706{
707 printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California\n");
708 return 0;
709}
710
711void cleanup_module(void)
712{
713 return;
714}
715
716#endif /* MODULE */
717#else /* CONFIG_INET */ 699#else /* CONFIG_INET */
718 700
719 701
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index d37bd860b336..4438fe8c9499 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -55,8 +55,6 @@ static const char version[] =
55 ) 55 )
56#endif 56#endif
57 57
58
59#include <linux/config.h>
60#include <linux/init.h> 58#include <linux/init.h>
61#include <linux/module.h> 59#include <linux/module.h>
62#include <linux/kernel.h> 60#include <linux/kernel.h>
@@ -1092,6 +1090,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs
1092 /* Spurious interrupt check */ 1090 /* Spurious interrupt check */
1093 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != 1091 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
1094 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { 1092 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
1093 spin_unlock_irqrestore(&lp->lock, flags);
1095 return IRQ_NONE; 1094 return IRQ_NONE;
1096 } 1095 }
1097 1096
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 3d8dcb6c8758..cf62373b808b 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev)
321 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 321 DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
322 322
323 /* Disable all interrupts, block TX tasklet */ 323 /* Disable all interrupts, block TX tasklet */
324 spin_lock(&lp->lock); 324 spin_lock_irq(&lp->lock);
325 SMC_SELECT_BANK(2); 325 SMC_SELECT_BANK(2);
326 SMC_SET_INT_MASK(0); 326 SMC_SET_INT_MASK(0);
327 pending_skb = lp->pending_tx_skb; 327 pending_skb = lp->pending_tx_skb;
328 lp->pending_tx_skb = NULL; 328 lp->pending_tx_skb = NULL;
329 spin_unlock(&lp->lock); 329 spin_unlock_irq(&lp->lock);
330 330
331 /* free any pending tx skb */ 331 /* free any pending tx skb */
332 if (pending_skb) { 332 if (pending_skb) {
@@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev)
448 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 448 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
449 449
450 /* no more interrupts for me */ 450 /* no more interrupts for me */
451 spin_lock(&lp->lock); 451 spin_lock_irq(&lp->lock);
452 SMC_SELECT_BANK(2); 452 SMC_SELECT_BANK(2);
453 SMC_SET_INT_MASK(0); 453 SMC_SET_INT_MASK(0);
454 pending_skb = lp->pending_tx_skb; 454 pending_skb = lp->pending_tx_skb;
455 lp->pending_tx_skb = NULL; 455 lp->pending_tx_skb = NULL;
456 spin_unlock(&lp->lock); 456 spin_unlock_irq(&lp->lock);
457 if (pending_skb) 457 if (pending_skb)
458 dev_kfree_skb(pending_skb); 458 dev_kfree_skb(pending_skb);
459 459
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 4ec4b4d23ae5..7aa7fbac8224 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -136,14 +136,9 @@
136#define SMC_CAN_USE_32BIT 0 136#define SMC_CAN_USE_32BIT 0
137#define SMC_IO_SHIFT 0 137#define SMC_IO_SHIFT 0
138#define SMC_NOWAIT 1 138#define SMC_NOWAIT 1
139#define SMC_USE_PXA_DMA 1
140 139
141#define SMC_inb(a, r) readb((a) + (r))
142#define SMC_inw(a, r) readw((a) + (r)) 140#define SMC_inw(a, r) readw((a) + (r))
143#define SMC_inl(a, r) readl((a) + (r))
144#define SMC_outb(v, a, r) writeb(v, (a) + (r))
145#define SMC_outw(v, a, r) writew(v, (a) + (r)) 141#define SMC_outw(v, a, r) writew(v, (a) + (r))
146#define SMC_outl(v, a, r) writel(v, (a) + (r))
147#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 142#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
148#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 143#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
149 144
@@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
189#define SMC_IO_SHIFT 0 184#define SMC_IO_SHIFT 0
190#define SMC_NOWAIT 1 185#define SMC_NOWAIT 1
191 186
192#define SMC_inb(a, r) readb((a) + (r))
193#define SMC_outb(v, a, r) writeb(v, (a) + (r))
194#define SMC_inw(a, r) readw((a) + (r)) 187#define SMC_inw(a, r) readw((a) + (r))
195#define SMC_outw(v, a, r) writew(v, (a) + (r)) 188#define SMC_outw(v, a, r) writew(v, (a) + (r))
196#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 189#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
197#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 190#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
198#define SMC_inl(a, r) readl((a) + (r))
199#define SMC_outl(v, a, r) writel(v, (a) + (r))
200#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
201#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
202 191
203#include <asm/mach-types.h> 192#include <asm/mach-types.h>
204#include <asm/arch/cpu.h> 193#include <asm/arch/cpu.h>
@@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
372 361
373#define SMC_IRQ_FLAGS (0) 362#define SMC_IRQ_FLAGS (0)
374 363
364#elif defined(CONFIG_ARCH_VERSATILE)
365
366#define SMC_CAN_USE_8BIT 1
367#define SMC_CAN_USE_16BIT 1
368#define SMC_CAN_USE_32BIT 1
369#define SMC_NOWAIT 1
370
371#define SMC_inb(a, r) readb((a) + (r))
372#define SMC_inw(a, r) readw((a) + (r))
373#define SMC_inl(a, r) readl((a) + (r))
374#define SMC_outb(v, a, r) writeb(v, (a) + (r))
375#define SMC_outw(v, a, r) writew(v, (a) + (r))
376#define SMC_outl(v, a, r) writel(v, (a) + (r))
377#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
378#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
379
380#define SMC_IRQ_FLAGS (0)
381
375#else 382#else
376 383
377#define SMC_CAN_USE_8BIT 1 384#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 647f62e9707d..88907218457a 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev)
1611 int result; 1611 int result;
1612 1612
1613 result = -ENOMEM; 1613 result = -ENOMEM;
1614 if (spider_net_init_chain(card, &card->tx_chain, 1614 if (spider_net_init_chain(card, &card->tx_chain, card->descr,
1615 card->descr, 1615 PCI_DMA_TODEVICE, card->tx_desc))
1616 PCI_DMA_TODEVICE, tx_descriptors))
1617 goto alloc_tx_failed; 1616 goto alloc_tx_failed;
1618 if (spider_net_init_chain(card, &card->rx_chain, 1617 if (spider_net_init_chain(card, &card->rx_chain,
1619 card->descr + tx_descriptors, 1618 card->descr + card->rx_desc,
1620 PCI_DMA_FROMDEVICE, rx_descriptors)) 1619 PCI_DMA_FROMDEVICE, card->rx_desc))
1621 goto alloc_rx_failed; 1620 goto alloc_rx_failed;
1622 1621
1623 /* allocate rx skbs */ 1622 /* allocate rx skbs */
@@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card)
2005 2004
2006 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2005 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2007 2006
2007 card->tx_desc = tx_descriptors;
2008 card->rx_desc = rx_descriptors;
2009
2008 spider_net_setup_netdev_ops(netdev); 2010 spider_net_setup_netdev_ops(netdev);
2009 2011
2010 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; 2012 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index f6dcf180ae3d..30407cdf0892 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -440,6 +440,9 @@ struct spider_net_card {
440 /* for ethtool */ 440 /* for ethtool */
441 int msg_enable; 441 int msg_enable;
442 442
443 int rx_desc;
444 int tx_desc;
445
443 struct spider_net_descr descr[0]; 446 struct spider_net_descr descr[0];
444}; 447};
445 448
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index a5bb0b7633af..02209222b8c9 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
130 return 0; 130 return 0;
131} 131}
132 132
133static void
134spider_net_ethtool_get_ringparam(struct net_device *netdev,
135 struct ethtool_ringparam *ering)
136{
137 struct spider_net_card *card = netdev->priv;
138
139 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
140 ering->tx_pending = card->tx_desc;
141 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
142 ering->rx_pending = card->rx_desc;
143}
144
133struct ethtool_ops spider_net_ethtool_ops = { 145struct ethtool_ops spider_net_ethtool_ops = {
134 .get_settings = spider_net_ethtool_get_settings, 146 .get_settings = spider_net_ethtool_get_settings,
135 .get_drvinfo = spider_net_ethtool_get_drvinfo, 147 .get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = {
141 .set_rx_csum = spider_net_ethtool_set_rx_csum, 153 .set_rx_csum = spider_net_ethtool_set_rx_csum,
142 .get_tx_csum = spider_net_ethtool_get_tx_csum, 154 .get_tx_csum = spider_net_ethtool_get_tx_csum,
143 .set_tx_csum = spider_net_ethtool_set_tx_csum, 155 .set_tx_csum = spider_net_ethtool_set_tx_csum,
156 .get_ringparam = spider_net_ethtool_get_ringparam,
144}; 157};
145 158
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index c0a62b00ffc8..8e1f6206b7d0 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -2053,7 +2053,7 @@ static int __init starfire_init (void)
2053 return -ENODEV; 2053 return -ENODEV;
2054 } 2054 }
2055 2055
2056 return pci_module_init (&starfire_driver); 2056 return pci_register_driver(&starfire_driver);
2057} 2057}
2058 2058
2059 2059
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 2dcadb169a22..0d76e2214762 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -914,7 +914,7 @@ static void set_multicast_list( struct net_device *dev )
914 914
915 if (dev->flags & IFF_PROMISC) { 915 if (dev->flags & IFF_PROMISC) {
916 /* Log any net taps. */ 916 /* Log any net taps. */
917 DPRINTK( 1, ( "%s: Promiscuous mode enabled.\n", dev->name )); 917 DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
918 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ 918 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
919 } else { 919 } else {
920 short multicast_table[4]; 920 short multicast_table[4];
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index ac17377b3e9f..a3a7a3506bd2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -17,12 +17,14 @@
17 Support and updates available at 17 Support and updates available at
18 http://www.scyld.com/network/sundance.html 18 http://www.scyld.com/network/sundance.html
19 [link no longer provides useful info -jgarzik] 19 [link no longer provides useful info -jgarzik]
20 Archives of the mailing list are still available at
21 http://www.beowulf.org/pipermail/netdrivers/
20 22
21*/ 23*/
22 24
23#define DRV_NAME "sundance" 25#define DRV_NAME "sundance"
24#define DRV_VERSION "1.1" 26#define DRV_VERSION "1.2"
25#define DRV_RELDATE "27-Jun-2006" 27#define DRV_RELDATE "11-Sep-2006"
26 28
27 29
28/* The user-configurable values. 30/* The user-configurable values.
@@ -107,7 +109,7 @@ static char *media[MAX_UNITS];
107#endif 109#endif
108 110
109/* These identify the driver base version and may not be removed. */ 111/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 112static char version[] =
111KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 113KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
112KERN_INFO " http://www.scyld.com/network/sundance.html\n"; 114KERN_INFO " http://www.scyld.com/network/sundance.html\n";
113 115
@@ -646,7 +648,7 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
646 /* Reset the chip to erase previous misconfiguration. */ 648 /* Reset the chip to erase previous misconfiguration. */
647 if (netif_msg_hw(np)) 649 if (netif_msg_hw(np))
648 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); 650 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
649 iowrite16(0x00ff, ioaddr + ASICCtrl + 2); 651 sundance_reset(dev, 0x00ff << 16);
650 if (netif_msg_hw(np)) 652 if (netif_msg_hw(np))
651 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); 653 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
652 654
@@ -1075,13 +1077,8 @@ reset_tx (struct net_device *dev)
1075 1077
1076 /* Reset tx logic, TxListPtr will be cleaned */ 1078 /* Reset tx logic, TxListPtr will be cleaned */
1077 iowrite16 (TxDisable, ioaddr + MACCtrl1); 1079 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1078 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset, 1080 sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16);
1079 ioaddr + ASICCtrl + 2); 1081
1080 for (i=50; i > 0; i--) {
1081 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1082 break;
1083 mdelay(1);
1084 }
1085 /* free all tx skbuff */ 1082 /* free all tx skbuff */
1086 for (i = 0; i < TX_RING_SIZE; i++) { 1083 for (i = 0; i < TX_RING_SIZE; i++) {
1087 skb = np->tx_skbuff[i]; 1084 skb = np->tx_skbuff[i];
@@ -1467,8 +1464,6 @@ static void set_rx_mode(struct net_device *dev)
1467 int i; 1464 int i;
1468 1465
1469 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1466 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1470 /* Unconditionally log net taps. */
1471 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1472 memset(mc_filter, 0xff, sizeof(mc_filter)); 1467 memset(mc_filter, 0xff, sizeof(mc_filter));
1473 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; 1468 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1474 } else if ((dev->mc_count > multicast_filter_limit) 1469 } else if ((dev->mc_count > multicast_filter_limit)
@@ -1736,7 +1731,7 @@ static int __init sundance_init(void)
1736#ifdef MODULE 1731#ifdef MODULE
1737 printk(version); 1732 printk(version);
1738#endif 1733#endif
1739 return pci_module_init(&sundance_driver); 1734 return pci_register_driver(&sundance_driver);
1740} 1735}
1741 1736
1742static void __exit sundance_exit(void) 1737static void __exit sundance_exit(void)
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b70bbd748978..1a441a8a2add 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -3194,7 +3194,7 @@ static struct pci_driver gem_driver = {
3194 3194
3195static int __init gem_init(void) 3195static int __init gem_init(void)
3196{ 3196{
3197 return pci_module_init(&gem_driver); 3197 return pci_register_driver(&gem_driver);
3198} 3198}
3199 3199
3200static void __exit gem_cleanup(void) 3200static void __exit gem_cleanup(void)
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 1ef9fd39a79a..ec0413609f36 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
1537{ 1537{
1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) || 1538 if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) { 1539 (idprom->id_machtype == (SM_SUN4|SM_4_470))) {
1540 memset(&sun4_sdev, 0, sizeof(sdev)); 1540 memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr; 1541 sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
1542 sun4_sdev.irqs[0] = 6; 1542 sun4_sdev.irqs[0] = 6;
1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL); 1543 return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
@@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
1547 1547
1548static int __exit sunlance_sun4_remove(void) 1548static int __exit sunlance_sun4_remove(void)
1549{ 1549{
1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev); 1550 struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
1551 struct net_device *net_dev = lp->dev; 1551 struct net_device *net_dev = lp->dev;
1552 1552
1553 unregister_netdevice(net_dev); 1553 unregister_netdevice(net_dev);
1554 1554
1555 lance_free_hwresources(root_lance_dev); 1555 lance_free_hwresources(lp);
1556 1556
1557 free_netdev(net_dev); 1557 free_netdev(net_dev);
1558 1558
1559 dev_set_drvdata(&sun4_sdev->dev, NULL); 1559 dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
1560 1560
1561 return 0; 1561 return 0;
1562} 1562}
@@ -1566,20 +1566,21 @@ static int __exit sunlance_sun4_remove(void)
1566static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match) 1566static int __devinit sunlance_sbus_probe(struct of_device *dev, const struct of_device_id *match)
1567{ 1567{
1568 struct sbus_dev *sdev = to_sbus_device(&dev->dev); 1568 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
1569 struct device_node *dp = dev->node;
1570 int err; 1569 int err;
1571 1570
1572 if (!strcmp(dp->name, "le")) { 1571 if (sdev->parent) {
1573 err = sparc_lance_probe_one(sdev, NULL, NULL); 1572 struct of_device *parent = &sdev->parent->ofdev;
1574 } else if (!strcmp(dp->name, "ledma")) {
1575 struct sbus_dma *ledma = find_ledma(sdev);
1576 1573
1577 err = sparc_lance_probe_one(sdev->child, ledma, NULL); 1574 if (!strcmp(parent->node->name, "ledma")) {
1578 } else { 1575 struct sbus_dma *ledma = find_ledma(to_sbus_device(&parent->dev));
1579 BUG_ON(strcmp(dp->name, "lebuffer"));
1580 1576
1581 err = sparc_lance_probe_one(sdev->child, NULL, sdev); 1577 err = sparc_lance_probe_one(sdev, ledma, NULL);
1582 } 1578 } else if (!strcmp(parent->node->name, "lebuffer")) {
1579 err = sparc_lance_probe_one(sdev, NULL, to_sbus_device(&parent->dev));
1580 } else
1581 err = sparc_lance_probe_one(sdev, NULL, NULL);
1582 } else
1583 err = sparc_lance_probe_one(sdev, NULL, NULL);
1583 1584
1584 return err; 1585 return err;
1585} 1586}
@@ -1604,12 +1605,6 @@ static struct of_device_id sunlance_sbus_match[] = {
1604 { 1605 {
1605 .name = "le", 1606 .name = "le",
1606 }, 1607 },
1607 {
1608 .name = "ledma",
1609 },
1610 {
1611 .name = "lebuffer",
1612 },
1613 {}, 1608 {},
1614}; 1609};
1615 1610
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 8b53ded66d37..39460fa916fe 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1725,7 +1725,7 @@ static struct pci_driver tc35815_driver = {
1725 1725
1726static int __init tc35815_init_module(void) 1726static int __init tc35815_init_module(void)
1727{ 1727{
1728 return pci_module_init(&tc35815_driver); 1728 return pci_register_driver(&tc35815_driver);
1729} 1729}
1730 1730
1731static void __exit tc35815_cleanup_module(void) 1731static void __exit tc35815_cleanup_module(void)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 1b8138f641e3..d6e2a6869f28 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.63" 71#define DRV_MODULE_VERSION "3.65"
72#define DRV_MODULE_RELDATE "July 25, 2006" 72#define DRV_MODULE_RELDATE "August 07, 2006"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -123,9 +123,6 @@
123 TG3_RX_RCB_RING_SIZE(tp)) 123 TG3_RX_RCB_RING_SIZE(tp))
124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \ 124#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE) 125 TG3_TX_RING_SIZE)
126#define TX_BUFFS_AVAIL(TP) \
127 ((TP)->tx_pending - \
128 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
129#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 126#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
130 127
131#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64) 128#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
@@ -2987,6 +2984,13 @@ static void tg3_tx_recover(struct tg3 *tp)
2987 spin_unlock(&tp->lock); 2984 spin_unlock(&tp->lock);
2988} 2985}
2989 2986
2987static inline u32 tg3_tx_avail(struct tg3 *tp)
2988{
2989 smp_mb();
2990 return (tp->tx_pending -
2991 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2992}
2993
2990/* Tigon3 never reports partial packet sends. So we do not 2994/* Tigon3 never reports partial packet sends. So we do not
2991 * need special logic to handle SKBs that have not had all 2995 * need special logic to handle SKBs that have not had all
2992 * of their frags sent yet, like SunGEM does. 2996 * of their frags sent yet, like SunGEM does.
@@ -3038,12 +3042,20 @@ static void tg3_tx(struct tg3 *tp)
3038 3042
3039 tp->tx_cons = sw_idx; 3043 tp->tx_cons = sw_idx;
3040 3044
3041 if (unlikely(netif_queue_stopped(tp->dev))) { 3045 /* Need to make the tx_cons update visible to tg3_start_xmit()
3042 spin_lock(&tp->tx_lock); 3046 * before checking for netif_queue_stopped(). Without the
3047 * memory barrier, there is a small possibility that tg3_start_xmit()
3048 * will miss it and cause the queue to be stopped forever.
3049 */
3050 smp_mb();
3051
3052 if (unlikely(netif_queue_stopped(tp->dev) &&
3053 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3054 netif_tx_lock(tp->dev);
3043 if (netif_queue_stopped(tp->dev) && 3055 if (netif_queue_stopped(tp->dev) &&
3044 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH)) 3056 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3045 netif_wake_queue(tp->dev); 3057 netif_wake_queue(tp->dev);
3046 spin_unlock(&tp->tx_lock); 3058 netif_tx_unlock(tp->dev);
3047 } 3059 }
3048} 3060}
3049 3061
@@ -3097,11 +3109,10 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3097 * Callers depend upon this behavior and assume that 3109 * Callers depend upon this behavior and assume that
3098 * we leave everything unchanged if we fail. 3110 * we leave everything unchanged if we fail.
3099 */ 3111 */
3100 skb = dev_alloc_skb(skb_size); 3112 skb = netdev_alloc_skb(tp->dev, skb_size);
3101 if (skb == NULL) 3113 if (skb == NULL)
3102 return -ENOMEM; 3114 return -ENOMEM;
3103 3115
3104 skb->dev = tp->dev;
3105 skb_reserve(skb, tp->rx_offset); 3116 skb_reserve(skb, tp->rx_offset);
3106 3117
3107 mapping = pci_map_single(tp->pdev, skb->data, 3118 mapping = pci_map_single(tp->pdev, skb->data,
@@ -3270,11 +3281,10 @@ static int tg3_rx(struct tg3 *tp, int budget)
3270 tg3_recycle_rx(tp, opaque_key, 3281 tg3_recycle_rx(tp, opaque_key,
3271 desc_idx, *post_ptr); 3282 desc_idx, *post_ptr);
3272 3283
3273 copy_skb = dev_alloc_skb(len + 2); 3284 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3274 if (copy_skb == NULL) 3285 if (copy_skb == NULL)
3275 goto drop_it_no_recycle; 3286 goto drop_it_no_recycle;
3276 3287
3277 copy_skb->dev = tp->dev;
3278 skb_reserve(copy_skb, 2); 3288 skb_reserve(copy_skb, 2);
3279 skb_put(copy_skb, len); 3289 skb_put(copy_skb, len);
3280 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 3290 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
@@ -3797,7 +3807,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3797 * interrupt. Furthermore, IRQ processing runs lockless so we have 3807 * interrupt. Furthermore, IRQ processing runs lockless so we have
3798 * no IRQ context deadlocks to worry about either. Rejoice! 3808 * no IRQ context deadlocks to worry about either. Rejoice!
3799 */ 3809 */
3800 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3810 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3801 if (!netif_queue_stopped(dev)) { 3811 if (!netif_queue_stopped(dev)) {
3802 netif_stop_queue(dev); 3812 netif_stop_queue(dev);
3803 3813
@@ -3893,12 +3903,10 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3893 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 3903 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3894 3904
3895 tp->tx_prod = entry; 3905 tp->tx_prod = entry;
3896 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 3906 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3897 spin_lock(&tp->tx_lock);
3898 netif_stop_queue(dev); 3907 netif_stop_queue(dev);
3899 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 3908 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3900 netif_wake_queue(tp->dev); 3909 netif_wake_queue(tp->dev);
3901 spin_unlock(&tp->tx_lock);
3902 } 3910 }
3903 3911
3904out_unlock: 3912out_unlock:
@@ -3920,7 +3928,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3920 struct sk_buff *segs, *nskb; 3928 struct sk_buff *segs, *nskb;
3921 3929
3922 /* Estimate the number of fragments in the worst case */ 3930 /* Estimate the number of fragments in the worst case */
3923 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->gso_segs * 3))) { 3931 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3924 netif_stop_queue(tp->dev); 3932 netif_stop_queue(tp->dev);
3925 return NETDEV_TX_BUSY; 3933 return NETDEV_TX_BUSY;
3926 } 3934 }
@@ -3960,7 +3968,7 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3960 * interrupt. Furthermore, IRQ processing runs lockless so we have 3968 * interrupt. Furthermore, IRQ processing runs lockless so we have
3961 * no IRQ context deadlocks to worry about either. Rejoice! 3969 * no IRQ context deadlocks to worry about either. Rejoice!
3962 */ 3970 */
3963 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { 3971 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3964 if (!netif_queue_stopped(dev)) { 3972 if (!netif_queue_stopped(dev)) {
3965 netif_stop_queue(dev); 3973 netif_stop_queue(dev);
3966 3974
@@ -4110,12 +4118,10 @@ static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4110 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); 4118 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4111 4119
4112 tp->tx_prod = entry; 4120 tp->tx_prod = entry;
4113 if (unlikely(TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))) { 4121 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4114 spin_lock(&tp->tx_lock);
4115 netif_stop_queue(dev); 4122 netif_stop_queue(dev);
4116 if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) 4123 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4117 netif_wake_queue(tp->dev); 4124 netif_wake_queue(tp->dev);
4118 spin_unlock(&tp->tx_lock);
4119 } 4125 }
4120 4126
4121out_unlock: 4127out_unlock:
@@ -8618,7 +8624,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8618 err = -EIO; 8624 err = -EIO;
8619 8625
8620 tx_len = 1514; 8626 tx_len = 1514;
8621 skb = dev_alloc_skb(tx_len); 8627 skb = netdev_alloc_skb(tp->dev, tx_len);
8622 if (!skb) 8628 if (!skb)
8623 return -ENOMEM; 8629 return -ENOMEM;
8624 8630
@@ -11474,7 +11480,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
11474 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA; 11480 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11475#endif 11481#endif
11476 spin_lock_init(&tp->lock); 11482 spin_lock_init(&tp->lock);
11477 spin_lock_init(&tp->tx_lock);
11478 spin_lock_init(&tp->indirect_lock); 11483 spin_lock_init(&tp->indirect_lock);
11479 INIT_WORK(&tp->reset_task, tg3_reset_task, tp); 11484 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11480 11485
@@ -11814,7 +11819,7 @@ static struct pci_driver tg3_driver = {
11814 11819
11815static int __init tg3_init(void) 11820static int __init tg3_init(void)
11816{ 11821{
11817 return pci_module_init(&tg3_driver); 11822 return pci_register_driver(&tg3_driver);
11818} 11823}
11819 11824
11820static void __exit tg3_cleanup(void) 11825static void __exit tg3_cleanup(void)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index ba2c98711c88..3ecf356cfb08 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2079,9 +2079,9 @@ struct tg3 {
2079 * lock: Held during reset, PHY access, timer, and when 2079 * lock: Held during reset, PHY access, timer, and when
2080 * updating tg3_flags and tg3_flags2. 2080 * updating tg3_flags and tg3_flags2.
2081 * 2081 *
2082 * tx_lock: Held during tg3_start_xmit and tg3_tx only 2082 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
2083 * when calling netif_[start|stop]_queue. 2083 * netif_tx_lock when it needs to call
2084 * tg3_start_xmit is protected by netif_tx_lock. 2084 * netif_wake_queue.
2085 * 2085 *
2086 * Both of these locks are to be held with BH safety. 2086 * Both of these locks are to be held with BH safety.
2087 * 2087 *
@@ -2118,8 +2118,6 @@ struct tg3 {
2118 u32 tx_cons; 2118 u32 tx_cons;
2119 u32 tx_pending; 2119 u32 tx_pending;
2120 2120
2121 spinlock_t tx_lock;
2122
2123 struct tg3_tx_buffer_desc *tx_ring; 2121 struct tg3_tx_buffer_desc *tx_ring;
2124 struct tx_ring_info *tx_buffers; 2122 struct tx_ring_info *tx_buffers;
2125 dma_addr_t tx_desc_mapping; 2123 dma_addr_t tx_desc_mapping;
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 465921e3874c..412390ba142e 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1815,7 +1815,7 @@ static struct pci_driver xl_3c359_driver = {
1815 1815
1816static int __init xl_pci_init (void) 1816static int __init xl_pci_init (void)
1817{ 1817{
1818 return pci_module_init (&xl_3c359_driver); 1818 return pci_register_driver(&xl_3c359_driver);
1819} 1819}
1820 1820
1821 1821
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 9f491563944e..4470025ff7f8 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
140 140
141/* version and credits */ 141/* version and credits */
142#ifndef PCMCIA 142#ifndef PCMCIA
143static char version[] __initdata = 143static char version[] __devinitdata =
144 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" 144 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
145 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" 145 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
146 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" 146 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
@@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
216static int __devinitdata turbo_searched = 0; 216static int __devinitdata turbo_searched = 0;
217 217
218#ifndef PCMCIA 218#ifndef PCMCIA
219static __u32 ibmtr_mem_base __initdata = 0xd0000; 219static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
220#endif 220#endif
221 221
222static void __devinit PrtChanID(char *pcid, short stride) 222static void __devinit PrtChanID(char *pcid, short stride)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 28d968ffd5d0..0d66700c6ced 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1998,7 +1998,7 @@ static struct pci_driver streamer_pci_driver = {
1998}; 1998};
1999 1999
2000static int __init streamer_init_module(void) { 2000static int __init streamer_init_module(void) {
2001 return pci_module_init(&streamer_pci_driver); 2001 return pci_register_driver(&streamer_pci_driver);
2002} 2002}
2003 2003
2004static void __exit streamer_cleanup_module(void) { 2004static void __exit streamer_cleanup_module(void) {
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index cd2e0251e2bc..85a7f797d343 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0);
5666module_param_array(irq, int, NULL, 0); 5666module_param_array(irq, int, NULL, 0);
5667module_param(ringspeed, int, 0); 5667module_param(ringspeed, int, 0);
5668 5668
5669static struct net_device *setup_card(int n) 5669static struct net_device * __init setup_card(int n)
5670{ 5670{
5671 struct net_device *dev = alloc_trdev(sizeof(struct net_local)); 5671 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
5672 int err; 5672 int err;
@@ -5696,9 +5696,8 @@ out:
5696 free_netdev(dev); 5696 free_netdev(dev);
5697 return ERR_PTR(err); 5697 return ERR_PTR(err);
5698} 5698}
5699
5700 5699
5701int init_module(void) 5700int __init init_module(void)
5702{ 5701{
5703 int i, found = 0; 5702 int i, found = 0;
5704 struct net_device *dev; 5703 struct net_device *dev;
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 683f14b01c06..fa3a2bb105ad 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/21142.c 2 drivers/net/tulip/21142.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
@@ -26,9 +26,9 @@ static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
26 26
27/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list 27/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
28 of available transceivers. */ 28 of available transceivers. */
29void t21142_timer(unsigned long data) 29void t21142_media_task(void *data)
30{ 30{
31 struct net_device *dev = (struct net_device *)data; 31 struct net_device *dev = data;
32 struct tulip_private *tp = netdev_priv(dev); 32 struct tulip_private *tp = netdev_priv(dev);
33 void __iomem *ioaddr = tp->base_addr; 33 void __iomem *ioaddr = tp->base_addr;
34 int csr12 = ioread32(ioaddr + CSR12); 34 int csr12 = ioread32(ioaddr + CSR12);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d05c5aa254ee..17a2ebaef58c 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2138,17 +2138,21 @@ static int de_resume (struct pci_dev *pdev)
2138{ 2138{
2139 struct net_device *dev = pci_get_drvdata (pdev); 2139 struct net_device *dev = pci_get_drvdata (pdev);
2140 struct de_private *de = dev->priv; 2140 struct de_private *de = dev->priv;
2141 int retval = 0;
2141 2142
2142 rtnl_lock(); 2143 rtnl_lock();
2143 if (netif_device_present(dev)) 2144 if (netif_device_present(dev))
2144 goto out; 2145 goto out;
2145 if (netif_running(dev)) { 2146 if (!netif_running(dev))
2146 pci_enable_device(pdev); 2147 goto out_attach;
2147 de_init_hw(de); 2148 if ((retval = pci_enable_device(pdev))) {
2148 netif_device_attach(dev); 2149 printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
2149 } else { 2150 dev->name);
2150 netif_device_attach(dev); 2151 goto out;
2151 } 2152 }
2153 de_init_hw(de);
2154out_attach:
2155 netif_device_attach(dev);
2152out: 2156out:
2153 rtnl_unlock(); 2157 rtnl_unlock();
2154 return 0; 2158 return 0;
@@ -2172,7 +2176,7 @@ static int __init de_init (void)
2172#ifdef MODULE 2176#ifdef MODULE
2173 printk("%s", version); 2177 printk("%s", version);
2174#endif 2178#endif
2175 return pci_module_init (&de_driver); 2179 return pci_register_driver(&de_driver);
2176} 2180}
2177 2181
2178static void __exit de_exit (void) 2182static void __exit de_exit (void)
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 75ff14a55239..e661d0a9cc64 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -5754,7 +5754,7 @@ static int __init de4x5_module_init (void)
5754 int err = 0; 5754 int err = 0;
5755 5755
5756#ifdef CONFIG_PCI 5756#ifdef CONFIG_PCI
5757 err = pci_module_init (&de4x5_pci_driver); 5757 err = pci_register_driver(&de4x5_pci_driver);
5758#endif 5758#endif
5759#ifdef CONFIG_EISA 5759#ifdef CONFIG_EISA
5760 err |= eisa_driver_register (&de4x5_eisa_driver); 5760 err |= eisa_driver_register (&de4x5_eisa_driver);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 4e5b0f2acc39..66dade556821 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -2039,7 +2039,7 @@ static int __init dmfe_init_module(void)
2039 if (HPNA_NoiseFloor > 15) 2039 if (HPNA_NoiseFloor > 15)
2040 HPNA_NoiseFloor = 0; 2040 HPNA_NoiseFloor = 0;
2041 2041
2042 rc = pci_module_init(&dmfe_driver); 2042 rc = pci_register_driver(&dmfe_driver);
2043 if (rc < 0) 2043 if (rc < 0)
2044 return rc; 2044 return rc;
2045 2045
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 5ffbd5b300c0..206918bad539 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/eeprom.c 2 drivers/net/tulip/eeprom.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 99ccf2ebb342..7f8f5d42a761 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/interrupt.c 2 drivers/net/tulip/interrupt.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index e9bc2a958c14..20bd52b86993 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/media.c 2 drivers/net/tulip/media.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index ca7e53246adb..85a521e0d052 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/pnic.c 2 drivers/net/tulip/pnic.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index ab985023fcca..c31be0e377a8 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/pnic2.c 2 drivers/net/tulip/pnic2.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 Modified to hep support PNIC_II by Kevin B. Hendricks 7 Modified to hep support PNIC_II by Kevin B. Hendricks
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index e058a9fbfe88..066e5d6bcbd8 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -1,7 +1,7 @@
1/* 1/*
2 drivers/net/tulip/timer.c 2 drivers/net/tulip/timer.c
3 3
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
@@ -18,13 +18,14 @@
18#include "tulip.h" 18#include "tulip.h"
19 19
20 20
21void tulip_timer(unsigned long data) 21void tulip_media_task(void *data)
22{ 22{
23 struct net_device *dev = (struct net_device *)data; 23 struct net_device *dev = data;
24 struct tulip_private *tp = netdev_priv(dev); 24 struct tulip_private *tp = netdev_priv(dev);
25 void __iomem *ioaddr = tp->base_addr; 25 void __iomem *ioaddr = tp->base_addr;
26 u32 csr12 = ioread32(ioaddr + CSR12); 26 u32 csr12 = ioread32(ioaddr + CSR12);
27 int next_tick = 2*HZ; 27 int next_tick = 2*HZ;
28 unsigned long flags;
28 29
29 if (tulip_debug > 2) { 30 if (tulip_debug > 2) {
30 printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode" 31 printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
@@ -126,6 +127,15 @@ void tulip_timer(unsigned long data)
126 } 127 }
127 break; 128 break;
128 } 129 }
130
131
132 spin_lock_irqsave(&tp->lock, flags);
133 if (tp->timeout_recovery) {
134 tulip_tx_timeout_complete(tp, ioaddr);
135 tp->timeout_recovery = 0;
136 }
137 spin_unlock_irqrestore(&tp->lock, flags);
138
129 /* mod_timer synchronizes us with potential add_timer calls 139 /* mod_timer synchronizes us with potential add_timer calls
130 * from interrupts. 140 * from interrupts.
131 */ 141 */
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 3bcfbf3d23ed..25668ddb1f7e 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -30,11 +30,10 @@
30/* undefine, or define to various debugging levels (>4 == obscene levels) */ 30/* undefine, or define to various debugging levels (>4 == obscene levels) */
31#define TULIP_DEBUG 1 31#define TULIP_DEBUG 1
32 32
33/* undefine USE_IO_OPS for MMIO, define for PIO */
34#ifdef CONFIG_TULIP_MMIO 33#ifdef CONFIG_TULIP_MMIO
35# undef USE_IO_OPS 34#define TULIP_BAR 1 /* CBMA */
36#else 35#else
37# define USE_IO_OPS 1 36#define TULIP_BAR 0 /* CBIO */
38#endif 37#endif
39 38
40 39
@@ -44,7 +43,8 @@ struct tulip_chip_table {
44 int io_size; 43 int io_size;
45 int valid_intrs; /* CSR7 interrupt enable settings */ 44 int valid_intrs; /* CSR7 interrupt enable settings */
46 int flags; 45 int flags;
47 void (*media_timer) (unsigned long data); 46 void (*media_timer) (unsigned long);
47 void (*media_task) (void *);
48}; 48};
49 49
50 50
@@ -142,6 +142,7 @@ enum status_bits {
142 RxNoBuf = 0x80, 142 RxNoBuf = 0x80,
143 RxIntr = 0x40, 143 RxIntr = 0x40,
144 TxFIFOUnderflow = 0x20, 144 TxFIFOUnderflow = 0x20,
145 RxErrIntr = 0x10,
145 TxJabber = 0x08, 146 TxJabber = 0x08,
146 TxNoBuf = 0x04, 147 TxNoBuf = 0x04,
147 TxDied = 0x02, 148 TxDied = 0x02,
@@ -192,9 +193,14 @@ struct tulip_tx_desc {
192 193
193 194
194enum desc_status_bits { 195enum desc_status_bits {
195 DescOwned = 0x80000000, 196 DescOwned = 0x80000000,
196 RxDescFatalErr = 0x8000, 197 DescWholePkt = 0x60000000,
197 RxWholePkt = 0x0300, 198 DescEndPkt = 0x40000000,
199 DescStartPkt = 0x20000000,
200 DescEndRing = 0x02000000,
201 DescUseLink = 0x01000000,
202 RxDescFatalErr = 0x008000,
203 RxWholePkt = 0x00000300,
198}; 204};
199 205
200 206
@@ -366,6 +372,7 @@ struct tulip_private {
366 unsigned int medialock:1; /* Don't sense media type. */ 372 unsigned int medialock:1; /* Don't sense media type. */
367 unsigned int mediasense:1; /* Media sensing in progress. */ 373 unsigned int mediasense:1; /* Media sensing in progress. */
368 unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */ 374 unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
375 unsigned int timeout_recovery:1;
369 unsigned int csr0; /* CSR0 setting. */ 376 unsigned int csr0; /* CSR0 setting. */
370 unsigned int csr6; /* Current CSR6 control settings. */ 377 unsigned int csr6; /* Current CSR6 control settings. */
371 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ 378 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
@@ -384,6 +391,7 @@ struct tulip_private {
384 void __iomem *base_addr; 391 void __iomem *base_addr;
385 int csr12_shadow; 392 int csr12_shadow;
386 int pad0; /* Used for 8-byte alignment */ 393 int pad0; /* Used for 8-byte alignment */
394 struct work_struct media_work;
387}; 395};
388 396
389 397
@@ -398,7 +406,7 @@ struct eeprom_fixup {
398 406
399/* 21142.c */ 407/* 21142.c */
400extern u16 t21142_csr14[]; 408extern u16 t21142_csr14[];
401void t21142_timer(unsigned long data); 409void t21142_media_task(void *data);
402void t21142_start_nway(struct net_device *dev); 410void t21142_start_nway(struct net_device *dev);
403void t21142_lnk_change(struct net_device *dev, int csr5); 411void t21142_lnk_change(struct net_device *dev, int csr5);
404 412
@@ -436,7 +444,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5);
436void pnic_timer(unsigned long data); 444void pnic_timer(unsigned long data);
437 445
438/* timer.c */ 446/* timer.c */
439void tulip_timer(unsigned long data); 447void tulip_media_task(void *data);
440void mxic_timer(unsigned long data); 448void mxic_timer(unsigned long data);
441void comet_timer(unsigned long data); 449void comet_timer(unsigned long data);
442 450
@@ -485,4 +493,14 @@ static inline void tulip_restart_rxtx(struct tulip_private *tp)
485 tulip_start_rxtx(tp); 493 tulip_start_rxtx(tp);
486} 494}
487 495
496static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr)
497{
498 /* Stop and restart the chip's Tx processes. */
499 tulip_restart_rxtx(tp);
500 /* Trigger an immediate transmit demand. */
501 iowrite32(0, ioaddr + CSR1);
502
503 tp->stats.tx_errors++;
504}
505
488#endif /* __NET_TULIP_H__ */ 506#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 7351831f57ce..2034baf5a2bb 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1,7 +1,7 @@
1/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */ 1/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
2 2
3/* 3/*
4 Maintained by Jeff Garzik <jgarzik@pobox.com> 4 Maintained by Valerie Henson <val_henson@linux.intel.com>
5 Copyright 2000,2001 The Linux Kernel Team 5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker. 6 Written/copyright 1994-2001 by Donald Becker.
7 7
@@ -17,9 +17,9 @@
17 17
18#define DRV_NAME "tulip" 18#define DRV_NAME "tulip"
19#ifdef CONFIG_TULIP_NAPI 19#ifdef CONFIG_TULIP_NAPI
20#define DRV_VERSION "1.1.13-NAPI" /* Keep at least for test */ 20#define DRV_VERSION "1.1.14-NAPI" /* Keep at least for test */
21#else 21#else
22#define DRV_VERSION "1.1.13" 22#define DRV_VERSION "1.1.14"
23#endif 23#endif
24#define DRV_RELDATE "May 11, 2002" 24#define DRV_RELDATE "May 11, 2002"
25 25
@@ -130,7 +130,14 @@ int tulip_debug = TULIP_DEBUG;
130int tulip_debug = 1; 130int tulip_debug = 1;
131#endif 131#endif
132 132
133static void tulip_timer(unsigned long data)
134{
135 struct net_device *dev = (struct net_device *)data;
136 struct tulip_private *tp = netdev_priv(dev);
133 137
138 if (netif_running(dev))
139 schedule_work(&tp->media_work);
140}
134 141
135/* 142/*
136 * This table use during operation for capabilities and media timer. 143 * This table use during operation for capabilities and media timer.
@@ -144,59 +151,60 @@ struct tulip_chip_table tulip_tbl[] = {
144 151
145 /* DC21140 */ 152 /* DC21140 */
146 { "Digital DS21140 Tulip", 128, 0x0001ebef, 153 { "Digital DS21140 Tulip", 128, 0x0001ebef,
147 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer }, 154 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
155 tulip_media_task },
148 156
149 /* DC21142, DC21143 */ 157 /* DC21142, DC21143 */
150 { "Digital DS21143 Tulip", 128, 0x0801fbff, 158 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
151 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY 159 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
152 | HAS_INTR_MITIGATION | HAS_PCI_MWI, t21142_timer }, 160 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
153 161
154 /* LC82C168 */ 162 /* LC82C168 */
155 { "Lite-On 82c168 PNIC", 256, 0x0001fbef, 163 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
156 HAS_MII | HAS_PNICNWAY, pnic_timer }, 164 HAS_MII | HAS_PNICNWAY, pnic_timer, },
157 165
158 /* MX98713 */ 166 /* MX98713 */
159 { "Macronix 98713 PMAC", 128, 0x0001ebef, 167 { "Macronix 98713 PMAC", 128, 0x0001ebef,
160 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer }, 168 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
161 169
162 /* MX98715 */ 170 /* MX98715 */
163 { "Macronix 98715 PMAC", 256, 0x0001ebef, 171 { "Macronix 98715 PMAC", 256, 0x0001ebef,
164 HAS_MEDIA_TABLE, mxic_timer }, 172 HAS_MEDIA_TABLE, mxic_timer, },
165 173
166 /* MX98725 */ 174 /* MX98725 */
167 { "Macronix 98725 PMAC", 256, 0x0001ebef, 175 { "Macronix 98725 PMAC", 256, 0x0001ebef,
168 HAS_MEDIA_TABLE, mxic_timer }, 176 HAS_MEDIA_TABLE, mxic_timer, },
169 177
170 /* AX88140 */ 178 /* AX88140 */
171 { "ASIX AX88140", 128, 0x0001fbff, 179 { "ASIX AX88140", 128, 0x0001fbff,
172 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY 180 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
173 | IS_ASIX, tulip_timer }, 181 | IS_ASIX, tulip_timer, tulip_media_task },
174 182
175 /* PNIC2 */ 183 /* PNIC2 */
176 { "Lite-On PNIC-II", 256, 0x0801fbff, 184 { "Lite-On PNIC-II", 256, 0x0801fbff,
177 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer }, 185 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
178 186
179 /* COMET */ 187 /* COMET */
180 { "ADMtek Comet", 256, 0x0001abef, 188 { "ADMtek Comet", 256, 0x0001abef,
181 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer }, 189 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
182 190
183 /* COMPEX9881 */ 191 /* COMPEX9881 */
184 { "Compex 9881 PMAC", 128, 0x0001ebef, 192 { "Compex 9881 PMAC", 128, 0x0001ebef,
185 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer }, 193 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
186 194
187 /* I21145 */ 195 /* I21145 */
188 { "Intel DS21145 Tulip", 128, 0x0801fbff, 196 { "Intel DS21145 Tulip", 128, 0x0801fbff,
189 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI 197 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
190 | HAS_NWAY | HAS_PCI_MWI, t21142_timer }, 198 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
191 199
192 /* DM910X */ 200 /* DM910X */
193 { "Davicom DM9102/DM9102A", 128, 0x0001ebef, 201 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
194 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, 202 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
195 tulip_timer }, 203 tulip_timer, tulip_media_task },
196 204
197 /* RS7112 */ 205 /* RS7112 */
198 { "Conexant LANfinity", 256, 0x0001ebef, 206 { "Conexant LANfinity", 256, 0x0001ebef,
199 HAS_MII | HAS_ACPI, tulip_timer }, 207 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
200 208
201}; 209};
202 210
@@ -295,12 +303,14 @@ static void tulip_up(struct net_device *dev)
295 303
296 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ 304 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
297 iowrite32(0x00000001, ioaddr + CSR0); 305 iowrite32(0x00000001, ioaddr + CSR0);
306 pci_read_config_dword(tp->pdev, PCI_COMMAND, &i); /* flush write */
298 udelay(100); 307 udelay(100);
299 308
300 /* Deassert reset. 309 /* Deassert reset.
301 Wait the specified 50 PCI cycles after a reset by initializing 310 Wait the specified 50 PCI cycles after a reset by initializing
302 Tx and Rx queues and the address filter list. */ 311 Tx and Rx queues and the address filter list. */
303 iowrite32(tp->csr0, ioaddr + CSR0); 312 iowrite32(tp->csr0, ioaddr + CSR0);
313 pci_read_config_dword(tp->pdev, PCI_COMMAND, &i); /* flush write */
304 udelay(100); 314 udelay(100);
305 315
306 if (tulip_debug > 1) 316 if (tulip_debug > 1)
@@ -522,20 +532,9 @@ static void tulip_tx_timeout(struct net_device *dev)
522 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n", 532 "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
523 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), 533 dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
524 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); 534 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
525 if ( ! tp->medialock && tp->mtable) { 535 tp->timeout_recovery = 1;
526 do 536 schedule_work(&tp->media_work);
527 --tp->cur_index; 537 goto out_unlock;
528 while (tp->cur_index >= 0
529 && (tulip_media_cap[tp->mtable->mleaf[tp->cur_index].media]
530 & MediaIsFD));
531 if (--tp->cur_index < 0) {
532 /* We start again, but should instead look for default. */
533 tp->cur_index = tp->mtable->leafcount - 1;
534 }
535 tulip_select_media(dev, 0);
536 printk(KERN_WARNING "%s: transmit timed out, switching to %s "
537 "media.\n", dev->name, medianame[dev->if_port]);
538 }
539 } else if (tp->chip_id == PNIC2) { 538 } else if (tp->chip_id == PNIC2) {
540 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, " 539 printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
541 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n", 540 "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
@@ -575,14 +574,9 @@ static void tulip_tx_timeout(struct net_device *dev)
575 } 574 }
576#endif 575#endif
577 576
578 /* Stop and restart the chip's Tx processes . */ 577 tulip_tx_timeout_complete(tp, ioaddr);
579
580 tulip_restart_rxtx(tp);
581 /* Trigger an immediate transmit demand. */
582 iowrite32(0, ioaddr + CSR1);
583
584 tp->stats.tx_errors++;
585 578
579out_unlock:
586 spin_unlock_irqrestore (&tp->lock, flags); 580 spin_unlock_irqrestore (&tp->lock, flags);
587 dev->trans_start = jiffies; 581 dev->trans_start = jiffies;
588 netif_wake_queue (dev); 582 netif_wake_queue (dev);
@@ -732,6 +726,8 @@ static void tulip_down (struct net_device *dev)
732 void __iomem *ioaddr = tp->base_addr; 726 void __iomem *ioaddr = tp->base_addr;
733 unsigned long flags; 727 unsigned long flags;
734 728
729 flush_scheduled_work();
730
735 del_timer_sync (&tp->timer); 731 del_timer_sync (&tp->timer);
736#ifdef CONFIG_TULIP_NAPI 732#ifdef CONFIG_TULIP_NAPI
737 del_timer_sync (&tp->oom_timer); 733 del_timer_sync (&tp->oom_timer);
@@ -1023,8 +1019,6 @@ static void set_rx_mode(struct net_device *dev)
1023 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1019 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1024 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; 1020 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1025 csr6 |= AcceptAllMulticast | AcceptAllPhys; 1021 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1026 /* Unconditionally log net taps. */
1027 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1028 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) { 1022 } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1029 /* Too many to filter well -- accept all multicasts. */ 1023 /* Too many to filter well -- accept all multicasts. */
1030 tp->csr6 |= AcceptAllMulticast; 1024 tp->csr6 |= AcceptAllMulticast;
@@ -1361,11 +1355,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1361 if (pci_request_regions (pdev, "tulip")) 1355 if (pci_request_regions (pdev, "tulip"))
1362 goto err_out_free_netdev; 1356 goto err_out_free_netdev;
1363 1357
1364#ifndef USE_IO_OPS 1358 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1365 ioaddr = pci_iomap(pdev, 1, tulip_tbl[chip_idx].io_size); 1359
1366#else
1367 ioaddr = pci_iomap(pdev, 0, tulip_tbl[chip_idx].io_size);
1368#endif
1369 if (!ioaddr) 1360 if (!ioaddr)
1370 goto err_out_free_res; 1361 goto err_out_free_res;
1371 1362
@@ -1398,6 +1389,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1398 tp->timer.data = (unsigned long)dev; 1389 tp->timer.data = (unsigned long)dev;
1399 tp->timer.function = tulip_tbl[tp->chip_id].media_timer; 1390 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1400 1391
1392 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task, dev);
1393
1401 dev->base_addr = (unsigned long)ioaddr; 1394 dev->base_addr = (unsigned long)ioaddr;
1402 1395
1403#ifdef CONFIG_TULIP_MWI 1396#ifdef CONFIG_TULIP_MWI
@@ -1644,8 +1637,14 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1644 if (register_netdev(dev)) 1637 if (register_netdev(dev))
1645 goto err_out_free_ring; 1638 goto err_out_free_ring;
1646 1639
1647 printk(KERN_INFO "%s: %s rev %d at %p,", 1640 printk(KERN_INFO "%s: %s rev %d at "
1648 dev->name, chip_name, chip_rev, ioaddr); 1641#ifdef CONFIG_TULIP_MMIO
1642 "MMIO"
1643#else
1644 "Port"
1645#endif
1646 " %#llx,", dev->name, chip_name, chip_rev,
1647 (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
1649 pci_set_drvdata(pdev, dev); 1648 pci_set_drvdata(pdev, dev);
1650 1649
1651 if (eeprom_missing) 1650 if (eeprom_missing)
@@ -1768,7 +1767,10 @@ static int tulip_resume(struct pci_dev *pdev)
1768 pci_set_power_state(pdev, PCI_D0); 1767 pci_set_power_state(pdev, PCI_D0);
1769 pci_restore_state(pdev); 1768 pci_restore_state(pdev);
1770 1769
1771 pci_enable_device(pdev); 1770 if ((retval = pci_enable_device(pdev))) {
1771 printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
1772 return retval;
1773 }
1772 1774
1773 if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1775 if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1774 printk (KERN_ERR "tulip: request_irq failed in resume\n"); 1776 printk (KERN_ERR "tulip: request_irq failed in resume\n");
@@ -1849,7 +1851,7 @@ static int __init tulip_init (void)
1849 tulip_max_interrupt_work = max_interrupt_work; 1851 tulip_max_interrupt_work = max_interrupt_work;
1850 1852
1851 /* probe for and init boards */ 1853 /* probe for and init boards */
1852 return pci_module_init (&tulip_driver); 1854 return pci_register_driver(&tulip_driver);
1853} 1855}
1854 1856
1855 1857
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fd64b2b3e99c..c4c720e2d4c3 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1702,7 +1702,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
1702 1702
1703static int __init uli526x_init_module(void) 1703static int __init uli526x_init_module(void)
1704{ 1704{
1705 int rc;
1706 1705
1707 printk(version); 1706 printk(version);
1708 printed_version = 1; 1707 printed_version = 1;
@@ -1714,22 +1713,19 @@ static int __init uli526x_init_module(void)
1714 if (cr6set) 1713 if (cr6set)
1715 uli526x_cr6_user_set = cr6set; 1714 uli526x_cr6_user_set = cr6set;
1716 1715
1717 switch(mode) { 1716 switch (mode) {
1718 case ULI526X_10MHF: 1717 case ULI526X_10MHF:
1719 case ULI526X_100MHF: 1718 case ULI526X_100MHF:
1720 case ULI526X_10MFD: 1719 case ULI526X_10MFD:
1721 case ULI526X_100MFD: 1720 case ULI526X_100MFD:
1722 uli526x_media_mode = mode; 1721 uli526x_media_mode = mode;
1723 break; 1722 break;
1724 default:uli526x_media_mode = ULI526X_AUTO; 1723 default:
1724 uli526x_media_mode = ULI526X_AUTO;
1725 break; 1725 break;
1726 } 1726 }
1727 1727
1728 rc = pci_module_init(&uli526x_driver); 1728 return pci_register_driver(&uli526x_driver);
1729 if (rc < 0)
1730 return rc;
1731
1732 return 0;
1733} 1729}
1734 1730
1735 1731
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 7f414815cc62..0e5344fe7e26 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -45,8 +45,8 @@
45*/ 45*/
46 46
47#define DRV_NAME "winbond-840" 47#define DRV_NAME "winbond-840"
48#define DRV_VERSION "1.01-d" 48#define DRV_VERSION "1.01-e"
49#define DRV_RELDATE "Nov-17-2001" 49#define DRV_RELDATE "Sep-11-2006"
50 50
51 51
52/* Automatically extracted configuration info: 52/* Automatically extracted configuration info:
@@ -90,10 +90,8 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
90 Making the Tx ring too large decreases the effectiveness of channel 90 Making the Tx ring too large decreases the effectiveness of channel
91 bonding and packet priority. 91 bonding and packet priority.
92 There are no ill effects from too-large receive rings. */ 92 There are no ill effects from too-large receive rings. */
93#define TX_RING_SIZE 16
94#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 93#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
95#define TX_QUEUE_LEN_RESTART 5 94#define TX_QUEUE_LEN_RESTART 5
96#define RX_RING_SIZE 32
97 95
98#define TX_BUFLIMIT (1024-128) 96#define TX_BUFLIMIT (1024-128)
99 97
@@ -137,8 +135,10 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
137#include <asm/io.h> 135#include <asm/io.h>
138#include <asm/irq.h> 136#include <asm/irq.h>
139 137
138#include "tulip.h"
139
140/* These identify the driver base version and may not be removed. */ 140/* These identify the driver base version and may not be removed. */
141static char version[] __devinitdata = 141static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
143KERN_INFO " http://www.scyld.com/network/drivers.html\n"; 143KERN_INFO " http://www.scyld.com/network/drivers.html\n";
144 144
@@ -242,8 +242,8 @@ static const struct pci_id_info pci_id_tbl[] __devinitdata = {
242}; 242};
243 243
244/* This driver was written to use PCI memory space, however some x86 systems 244/* This driver was written to use PCI memory space, however some x86 systems
245 work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space 245 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
246 accesses instead of memory space. */ 246*/
247 247
248/* Offsets to the Command and Status Registers, "CSRs". 248/* Offsets to the Command and Status Registers, "CSRs".
249 While similar to the Tulip, these registers are longword aligned. 249 While similar to the Tulip, these registers are longword aligned.
@@ -261,21 +261,11 @@ enum w840_offsets {
261 CurTxDescAddr=0x4C, CurTxBufAddr=0x50, 261 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
262}; 262};
263 263
264/* Bits in the interrupt status/enable registers. */
265/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
266enum intr_status_bits {
267 NormalIntr=0x10000, AbnormalIntr=0x8000,
268 IntrPCIErr=0x2000, TimerInt=0x800,
269 IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
270 TxFIFOUnderflow=0x20, RxErrIntr=0x10,
271 TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
272};
273
274/* Bits in the NetworkConfig register. */ 264/* Bits in the NetworkConfig register. */
275enum rx_mode_bits { 265enum rx_mode_bits {
276 AcceptErr=0x80, AcceptRunt=0x40, 266 AcceptErr=0x80,
277 AcceptBroadcast=0x20, AcceptMulticast=0x10, 267 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
278 AcceptAllPhys=0x08, AcceptMyPhys=0x02, 268 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
279}; 269};
280 270
281enum mii_reg_bits { 271enum mii_reg_bits {
@@ -297,13 +287,6 @@ struct w840_tx_desc {
297 u32 buffer1, buffer2; 287 u32 buffer1, buffer2;
298}; 288};
299 289
300/* Bits in network_desc.status */
301enum desc_status_bits {
302 DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
303 DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
304 DescIntr=0x80000000,
305};
306
307#define MII_CNT 1 /* winbond only supports one MII */ 290#define MII_CNT 1 /* winbond only supports one MII */
308struct netdev_private { 291struct netdev_private {
309 struct w840_rx_desc *rx_ring; 292 struct w840_rx_desc *rx_ring;
@@ -371,7 +354,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
371 int irq; 354 int irq;
372 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 355 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
373 void __iomem *ioaddr; 356 void __iomem *ioaddr;
374 int bar = 1;
375 357
376 i = pci_enable_device(pdev); 358 i = pci_enable_device(pdev);
377 if (i) return i; 359 if (i) return i;
@@ -393,10 +375,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
393 375
394 if (pci_request_regions(pdev, DRV_NAME)) 376 if (pci_request_regions(pdev, DRV_NAME))
395 goto err_out_netdev; 377 goto err_out_netdev;
396#ifdef USE_IO_OPS 378
397 bar = 0; 379 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
398#endif
399 ioaddr = pci_iomap(pdev, bar, netdev_res_size);
400 if (!ioaddr) 380 if (!ioaddr)
401 goto err_out_free_res; 381 goto err_out_free_res;
402 382
@@ -838,7 +818,7 @@ static void init_rxtx_rings(struct net_device *dev)
838 np->rx_buf_sz,PCI_DMA_FROMDEVICE); 818 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
839 819
840 np->rx_ring[i].buffer1 = np->rx_addr[i]; 820 np->rx_ring[i].buffer1 = np->rx_addr[i];
841 np->rx_ring[i].status = DescOwn; 821 np->rx_ring[i].status = DescOwned;
842 } 822 }
843 823
844 np->cur_rx = 0; 824 np->cur_rx = 0;
@@ -923,7 +903,7 @@ static void init_registers(struct net_device *dev)
923 } 903 }
924#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) 904#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
925 i |= 0xE000; 905 i |= 0xE000;
926#elif defined(__sparc__) 906#elif defined(__sparc__) || defined (CONFIG_PARISC)
927 i |= 0x4800; 907 i |= 0x4800;
928#else 908#else
929#warning Processor architecture undefined 909#warning Processor architecture undefined
@@ -1043,11 +1023,11 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1043 1023
1044 /* Now acquire the irq spinlock. 1024 /* Now acquire the irq spinlock.
1045 * The difficult race is the the ordering between 1025 * The difficult race is the the ordering between
1046 * increasing np->cur_tx and setting DescOwn: 1026 * increasing np->cur_tx and setting DescOwned:
1047 * - if np->cur_tx is increased first the interrupt 1027 * - if np->cur_tx is increased first the interrupt
1048 * handler could consider the packet as transmitted 1028 * handler could consider the packet as transmitted
1049 * since DescOwn is cleared. 1029 * since DescOwned is cleared.
1050 * - If DescOwn is set first the NIC could report the 1030 * - If DescOwned is set first the NIC could report the
1051 * packet as sent, but the interrupt handler would ignore it 1031 * packet as sent, but the interrupt handler would ignore it
1052 * since the np->cur_tx was not yet increased. 1032 * since the np->cur_tx was not yet increased.
1053 */ 1033 */
@@ -1055,7 +1035,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
1055 np->cur_tx++; 1035 np->cur_tx++;
1056 1036
1057 wmb(); /* flush length, buffer1, buffer2 */ 1037 wmb(); /* flush length, buffer1, buffer2 */
1058 np->tx_ring[entry].status = DescOwn; 1038 np->tx_ring[entry].status = DescOwned;
1059 wmb(); /* flush status and kick the hardware */ 1039 wmb(); /* flush status and kick the hardware */
1060 iowrite32(0, np->base_addr + TxStartDemand); 1040 iowrite32(0, np->base_addr + TxStartDemand);
1061 np->tx_q_bytes += skb->len; 1041 np->tx_q_bytes += skb->len;
@@ -1155,12 +1135,12 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1155 1135
1156 handled = 1; 1136 handled = 1;
1157 1137
1158 if (intr_status & (IntrRxDone | RxNoBuf)) 1138 if (intr_status & (RxIntr | RxNoBuf))
1159 netdev_rx(dev); 1139 netdev_rx(dev);
1160 if (intr_status & RxNoBuf) 1140 if (intr_status & RxNoBuf)
1161 iowrite32(0, ioaddr + RxStartDemand); 1141 iowrite32(0, ioaddr + RxStartDemand);
1162 1142
1163 if (intr_status & (TxIdle | IntrTxDone) && 1143 if (intr_status & (TxNoBuf | TxIntr) &&
1164 np->cur_tx != np->dirty_tx) { 1144 np->cur_tx != np->dirty_tx) {
1165 spin_lock(&np->lock); 1145 spin_lock(&np->lock);
1166 netdev_tx_done(dev); 1146 netdev_tx_done(dev);
@@ -1168,8 +1148,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs
1168 } 1148 }
1169 1149
1170 /* Abnormal error summary/uncommon events handlers. */ 1150 /* Abnormal error summary/uncommon events handlers. */
1171 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr | 1151 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SytemError |
1172 TimerInt | IntrTxStopped)) 1152 TimerInt | TxDied))
1173 netdev_error(dev, intr_status); 1153 netdev_error(dev, intr_status);
1174 1154
1175 if (--work_limit < 0) { 1155 if (--work_limit < 0) {
@@ -1305,7 +1285,7 @@ static int netdev_rx(struct net_device *dev)
1305 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1285 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1306 } 1286 }
1307 wmb(); 1287 wmb();
1308 np->rx_ring[entry].status = DescOwn; 1288 np->rx_ring[entry].status = DescOwned;
1309 } 1289 }
1310 1290
1311 return 0; 1291 return 0;
@@ -1342,7 +1322,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
1342 dev->name, new); 1322 dev->name, new);
1343 update_csr6(dev, new); 1323 update_csr6(dev, new);
1344 } 1324 }
1345 if (intr_status & IntrRxDied) { /* Missed a Rx frame. */ 1325 if (intr_status & RxDied) { /* Missed a Rx frame. */
1346 np->stats.rx_errors++; 1326 np->stats.rx_errors++;
1347 } 1327 }
1348 if (intr_status & TimerInt) { 1328 if (intr_status & TimerInt) {
@@ -1378,16 +1358,14 @@ static u32 __set_rx_mode(struct net_device *dev)
1378 u32 rx_mode; 1358 u32 rx_mode;
1379 1359
1380 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1360 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1381 /* Unconditionally log net taps. */
1382 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1383 memset(mc_filter, 0xff, sizeof(mc_filter)); 1361 memset(mc_filter, 0xff, sizeof(mc_filter));
1384 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys 1362 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1385 | AcceptMyPhys; 1363 | AcceptMyPhys;
1386 } else if ((dev->mc_count > multicast_filter_limit) 1364 } else if ((dev->mc_count > multicast_filter_limit)
1387 || (dev->flags & IFF_ALLMULTI)) { 1365 || (dev->flags & IFF_ALLMULTI)) {
1388 /* Too many to match, or accept all multicasts. */ 1366 /* Too many to match, or accept all multicasts. */
1389 memset(mc_filter, 0xff, sizeof(mc_filter)); 1367 memset(mc_filter, 0xff, sizeof(mc_filter));
1390 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1368 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1391 } else { 1369 } else {
1392 struct dev_mc_list *mclist; 1370 struct dev_mc_list *mclist;
1393 int i; 1371 int i;
@@ -1398,7 +1376,7 @@ static u32 __set_rx_mode(struct net_device *dev)
1398 filterbit &= 0x3f; 1376 filterbit &= 0x3f;
1399 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1377 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1400 } 1378 }
1401 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1379 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1402 } 1380 }
1403 iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 1381 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1404 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 1382 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
@@ -1646,14 +1624,18 @@ static int w840_resume (struct pci_dev *pdev)
1646{ 1624{
1647 struct net_device *dev = pci_get_drvdata (pdev); 1625 struct net_device *dev = pci_get_drvdata (pdev);
1648 struct netdev_private *np = netdev_priv(dev); 1626 struct netdev_private *np = netdev_priv(dev);
1627 int retval = 0;
1649 1628
1650 rtnl_lock(); 1629 rtnl_lock();
1651 if (netif_device_present(dev)) 1630 if (netif_device_present(dev))
1652 goto out; /* device not suspended */ 1631 goto out; /* device not suspended */
1653 if (netif_running(dev)) { 1632 if (netif_running(dev)) {
1654 pci_enable_device(pdev); 1633 if ((retval = pci_enable_device(pdev))) {
1655 /* pci_power_on(pdev); */ 1634 printk (KERN_ERR
1656 1635 "%s: pci_enable_device failed in resume\n",
1636 dev->name);
1637 goto out;
1638 }
1657 spin_lock_irq(&np->lock); 1639 spin_lock_irq(&np->lock);
1658 iowrite32(1, np->base_addr+PCIBusCfg); 1640 iowrite32(1, np->base_addr+PCIBusCfg);
1659 ioread32(np->base_addr+PCIBusCfg); 1641 ioread32(np->base_addr+PCIBusCfg);
@@ -1671,7 +1653,7 @@ static int w840_resume (struct pci_dev *pdev)
1671 } 1653 }
1672out: 1654out:
1673 rtnl_unlock(); 1655 rtnl_unlock();
1674 return 0; 1656 return retval;
1675} 1657}
1676#endif 1658#endif
1677 1659
@@ -1689,7 +1671,7 @@ static struct pci_driver w840_driver = {
1689static int __init w840_init(void) 1671static int __init w840_init(void)
1690{ 1672{
1691 printk(version); 1673 printk(version);
1692 return pci_module_init(&w840_driver); 1674 return pci_register_driver(&w840_driver);
1693} 1675}
1694 1676
1695static void __exit w840_exit(void) 1677static void __exit w840_exit(void)
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index f874e4f6ccf6..cf43390d2c80 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1264 1264
1265static int __init xircom_init(void) 1265static int __init xircom_init(void)
1266{ 1266{
1267 pci_register_driver(&xircom_ops); 1267 return pci_register_driver(&xircom_ops);
1268 return 0;
1269} 1268}
1270 1269
1271static void __exit xircom_exit(void) 1270static void __exit xircom_exit(void)
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index 17ca7dc42e6f..d797b7b2e35f 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -1707,7 +1707,7 @@ static int __init xircom_init(void)
1707#ifdef MODULE 1707#ifdef MODULE
1708 printk(version); 1708 printk(version);
1709#endif 1709#endif
1710 return pci_module_init(&xircom_driver); 1710 return pci_register_driver(&xircom_driver);
1711} 1711}
1712 1712
1713 1713
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 4103c37172f9..1084180205a3 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -100,8 +100,8 @@ static const int multicast_filter_limit = 32;
100#define PKT_BUF_SZ 1536 100#define PKT_BUF_SZ 1536
101 101
102#define DRV_MODULE_NAME "typhoon" 102#define DRV_MODULE_NAME "typhoon"
103#define DRV_MODULE_VERSION "1.5.7" 103#define DRV_MODULE_VERSION "1.5.8"
104#define DRV_MODULE_RELDATE "05/01/07" 104#define DRV_MODULE_RELDATE "06/11/09"
105#define PFX DRV_MODULE_NAME ": " 105#define PFX DRV_MODULE_NAME ": "
106#define ERR_PFX KERN_ERR PFX 106#define ERR_PFX KERN_ERR PFX
107 107
@@ -937,8 +937,6 @@ typhoon_set_rx_mode(struct net_device *dev)
937 937
938 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; 938 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
939 if(dev->flags & IFF_PROMISC) { 939 if(dev->flags & IFF_PROMISC) {
940 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
941 dev->name);
942 filter |= TYPHOON_RX_FILTER_PROMISCOUS; 940 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
943 } else if((dev->mc_count > multicast_filter_limit) || 941 } else if((dev->mc_count > multicast_filter_limit) ||
944 (dev->flags & IFF_ALLMULTI)) { 942 (dev->flags & IFF_ALLMULTI)) {
@@ -2660,7 +2658,7 @@ static struct pci_driver typhoon_driver = {
2660static int __init 2658static int __init
2661typhoon_init(void) 2659typhoon_init(void)
2662{ 2660{
2663 return pci_module_init(&typhoon_driver); 2661 return pci_register_driver(&typhoon_driver);
2664} 2662}
2665 2663
2666static void __exit 2664static void __exit
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
new file mode 100644
index 000000000000..4e188f4289b4
--- /dev/null
+++ b/drivers/net/ucc_geth.c
@@ -0,0 +1,4276 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * QE UCC Gigabit Ethernet Driver
8 *
9 * Changelog:
10 * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/dma-mapping.h>
32#include <linux/fsl_devices.h>
33#include <linux/ethtool.h>
34#include <linux/platform_device.h>
35#include <linux/mii.h>
36
37#include <asm/uaccess.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/immap_qe.h>
41#include <asm/qe.h>
42#include <asm/ucc.h>
43#include <asm/ucc_fast.h>
44
45#include "ucc_geth.h"
46#include "ucc_geth_phy.h"
47
48#undef DEBUG
49
50#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:Sept 11, 2006"
51#define DRV_NAME "ucc_geth"
52
53#define ugeth_printk(level, format, arg...) \
54 printk(level format "\n", ## arg)
55
56#define ugeth_dbg(format, arg...) \
57 ugeth_printk(KERN_DEBUG , format , ## arg)
58#define ugeth_err(format, arg...) \
59 ugeth_printk(KERN_ERR , format , ## arg)
60#define ugeth_info(format, arg...) \
61 ugeth_printk(KERN_INFO , format , ## arg)
62#define ugeth_warn(format, arg...) \
63 ugeth_printk(KERN_WARNING , format , ## arg)
64
65#ifdef UGETH_VERBOSE_DEBUG
66#define ugeth_vdbg ugeth_dbg
67#else
68#define ugeth_vdbg(fmt, args...) do { } while (0)
69#endif /* UGETH_VERBOSE_DEBUG */
70
71static DEFINE_SPINLOCK(ugeth_lock);
72
73static ucc_geth_info_t ugeth_primary_info = {
74 .uf_info = {
75 .bd_mem_part = MEM_PART_SYSTEM,
76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
77 .max_rx_buf_length = 1536,
78/* FIXME: should be changed in run time for 1G and 100M */
79#ifdef CONFIG_UGETH_HAS_GIGA
80 .urfs = UCC_GETH_URFS_GIGA_INIT,
81 .urfet = UCC_GETH_URFET_GIGA_INIT,
82 .urfset = UCC_GETH_URFSET_GIGA_INIT,
83 .utfs = UCC_GETH_UTFS_GIGA_INIT,
84 .utfet = UCC_GETH_UTFET_GIGA_INIT,
85 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
86#else
87 .urfs = UCC_GETH_URFS_INIT,
88 .urfet = UCC_GETH_URFET_INIT,
89 .urfset = UCC_GETH_URFSET_INIT,
90 .utfs = UCC_GETH_UTFS_INIT,
91 .utfet = UCC_GETH_UTFET_INIT,
92 .utftt = UCC_GETH_UTFTT_INIT,
93#endif
94 .ufpt = 256,
95 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
96 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
97 .tenc = UCC_FAST_TX_ENCODING_NRZ,
98 .renc = UCC_FAST_RX_ENCODING_NRZ,
99 .tcrc = UCC_FAST_16_BIT_CRC,
100 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
101 },
102 .numQueuesTx = 1,
103 .numQueuesRx = 1,
104 .extendedFilteringChainPointer = ((uint32_t) NULL),
105 .typeorlen = 3072 /*1536 */ ,
106 .nonBackToBackIfgPart1 = 0x40,
107 .nonBackToBackIfgPart2 = 0x60,
108 .miminumInterFrameGapEnforcement = 0x50,
109 .backToBackInterFrameGap = 0x60,
110 .mblinterval = 128,
111 .nortsrbytetime = 5,
112 .fracsiz = 1,
113 .strictpriorityq = 0xff,
114 .altBebTruncation = 0xa,
115 .excessDefer = 1,
116 .maxRetransmission = 0xf,
117 .collisionWindow = 0x37,
118 .receiveFlowControl = 1,
119 .maxGroupAddrInHash = 4,
120 .maxIndAddrInHash = 4,
121 .prel = 7,
122 .maxFrameLength = 1518,
123 .minFrameLength = 64,
124 .maxD1Length = 1520,
125 .maxD2Length = 1520,
126 .vlantype = 0x8100,
127 .ecamptr = ((uint32_t) NULL),
128 .eventRegMask = UCCE_OTHER,
129 .pausePeriod = 0xf000,
130 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
131 .bdRingLenTx = {
132 TX_BD_RING_LEN,
133 TX_BD_RING_LEN,
134 TX_BD_RING_LEN,
135 TX_BD_RING_LEN,
136 TX_BD_RING_LEN,
137 TX_BD_RING_LEN,
138 TX_BD_RING_LEN,
139 TX_BD_RING_LEN},
140
141 .bdRingLenRx = {
142 RX_BD_RING_LEN,
143 RX_BD_RING_LEN,
144 RX_BD_RING_LEN,
145 RX_BD_RING_LEN,
146 RX_BD_RING_LEN,
147 RX_BD_RING_LEN,
148 RX_BD_RING_LEN,
149 RX_BD_RING_LEN},
150
151 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
152 .largestexternallookupkeysize =
153 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
154 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
155 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
156 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
157 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
158 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
159 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
160 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
161 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
162 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164};
165
166static ucc_geth_info_t ugeth_info[8];
167
168#ifdef DEBUG
169static void mem_disp(u8 *addr, int size)
170{
171 u8 *i;
172 int size16Aling = (size >> 4) << 4;
173 int size4Aling = (size >> 2) << 2;
174 int notAlign = 0;
175 if (size % 16)
176 notAlign = 1;
177
178 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
179 printk("0x%08x: %08x %08x %08x %08x\r\n",
180 (u32) i,
181 *((u32 *) (i)),
182 *((u32 *) (i + 4)),
183 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
184 if (notAlign == 1)
185 printk("0x%08x: ", (u32) i);
186 for (; (u32) i < (u32) addr + size4Aling; i += 4)
187 printk("%08x ", *((u32 *) (i)));
188 for (; (u32) i < (u32) addr + size; i++)
189 printk("%02x", *((u8 *) (i)));
190 if (notAlign == 1)
191 printk("\r\n");
192}
193#endif /* DEBUG */
194
195#ifdef CONFIG_UGETH_FILTERING
196static void enqueue(struct list_head *node, struct list_head *lh)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(ugeth_lock, flags);
201 list_add_tail(node, lh);
202 spin_unlock_irqrestore(ugeth_lock, flags);
203}
204#endif /* CONFIG_UGETH_FILTERING */
205
206static struct list_head *dequeue(struct list_head *lh)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(ugeth_lock, flags);
211 if (!list_empty(lh)) {
212 struct list_head *node = lh->next;
213 list_del(node);
214 spin_unlock_irqrestore(ugeth_lock, flags);
215 return node;
216 } else {
217 spin_unlock_irqrestore(ugeth_lock, flags);
218 return NULL;
219 }
220}
221
222static int get_interface_details(enet_interface_e enet_interface,
223 enet_speed_e *speed,
224 int *r10m,
225 int *rmm,
226 int *rpm,
227 int *tbi, int *limited_to_full_duplex)
228{
229 /* Analyze enet_interface according to Interface Mode
230 Configuration table */
231 switch (enet_interface) {
232 case ENET_10_MII:
233 *speed = ENET_SPEED_10BT;
234 break;
235 case ENET_10_RMII:
236 *speed = ENET_SPEED_10BT;
237 *r10m = 1;
238 *rmm = 1;
239 break;
240 case ENET_10_RGMII:
241 *speed = ENET_SPEED_10BT;
242 *rpm = 1;
243 *r10m = 1;
244 *limited_to_full_duplex = 1;
245 break;
246 case ENET_100_MII:
247 *speed = ENET_SPEED_100BT;
248 break;
249 case ENET_100_RMII:
250 *speed = ENET_SPEED_100BT;
251 *rmm = 1;
252 break;
253 case ENET_100_RGMII:
254 *speed = ENET_SPEED_100BT;
255 *rpm = 1;
256 *limited_to_full_duplex = 1;
257 break;
258 case ENET_1000_GMII:
259 *speed = ENET_SPEED_1000BT;
260 *limited_to_full_duplex = 1;
261 break;
262 case ENET_1000_RGMII:
263 *speed = ENET_SPEED_1000BT;
264 *rpm = 1;
265 *limited_to_full_duplex = 1;
266 break;
267 case ENET_1000_TBI:
268 *speed = ENET_SPEED_1000BT;
269 *tbi = 1;
270 *limited_to_full_duplex = 1;
271 break;
272 case ENET_1000_RTBI:
273 *speed = ENET_SPEED_1000BT;
274 *rpm = 1;
275 *tbi = 1;
276 *limited_to_full_duplex = 1;
277 break;
278 default:
279 return -EINVAL;
280 break;
281 }
282
283 return 0;
284}
285
286static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
287{
288 struct sk_buff *skb = NULL;
289
290 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
291 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
292
293 if (skb == NULL)
294 return NULL;
295
296 /* We need the data buffer to be aligned properly. We will reserve
297 * as many bytes as needed to align the data properly
298 */
299 skb_reserve(skb,
300 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
301 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
302 1)));
303
304 skb->dev = ugeth->dev;
305
306 BD_BUFFER_SET(bd,
307 dma_map_single(NULL,
308 skb->data,
309 ugeth->ug_info->uf_info.max_rx_buf_length +
310 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
311 DMA_FROM_DEVICE));
312
313 BD_STATUS_AND_LENGTH_SET(bd,
314 (R_E | R_I |
315 (BD_STATUS_AND_LENGTH(bd) & R_W)));
316
317 return skb;
318}
319
320static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
321{
322 u8 *bd;
323 u32 bd_status;
324 struct sk_buff *skb;
325 int i;
326
327 bd = ugeth->p_rx_bd_ring[rxQ];
328 i = 0;
329
330 do {
331 bd_status = BD_STATUS_AND_LENGTH(bd);
332 skb = get_new_skb(ugeth, bd);
333
334 if (!skb) /* If can not allocate data buffer,
335 abort. Cleanup will be elsewhere */
336 return -ENOMEM;
337
338 ugeth->rx_skbuff[rxQ][i] = skb;
339
340 /* advance the BD pointer */
341 bd += UCC_GETH_SIZE_OF_BD;
342 i++;
343 } while (!(bd_status & R_W));
344
345 return 0;
346}
347
348static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
349 volatile u32 *p_start,
350 u8 num_entries,
351 u32 thread_size,
352 u32 thread_alignment,
353 qe_risc_allocation_e risc,
354 int skip_page_for_first_entry)
355{
356 u32 init_enet_offset;
357 u8 i;
358 int snum;
359
360 for (i = 0; i < num_entries; i++) {
361 if ((snum = qe_get_snum()) < 0) {
362 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
363 return snum;
364 }
365 if ((i == 0) && skip_page_for_first_entry)
366 /* First entry of Rx does not have page */
367 init_enet_offset = 0;
368 else {
369 init_enet_offset =
370 qe_muram_alloc(thread_size, thread_alignment);
371 if (IS_MURAM_ERR(init_enet_offset)) {
372 ugeth_err
373 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
374 qe_put_snum((u8) snum);
375 return -ENOMEM;
376 }
377 }
378 *(p_start++) =
379 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
380 | risc;
381 }
382
383 return 0;
384}
385
386static int return_init_enet_entries(ucc_geth_private_t *ugeth,
387 volatile u32 *p_start,
388 u8 num_entries,
389 qe_risc_allocation_e risc,
390 int skip_page_for_first_entry)
391{
392 u32 init_enet_offset;
393 u8 i;
394 int snum;
395
396 for (i = 0; i < num_entries; i++) {
397 /* Check that this entry was actually valid --
398 needed in case failed in allocations */
399 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
400 snum =
401 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
402 ENET_INIT_PARAM_SNUM_SHIFT;
403 qe_put_snum((u8) snum);
404 if (!((i == 0) && skip_page_for_first_entry)) {
405 /* First entry of Rx does not have page */
406 init_enet_offset =
407 (in_be32(p_start) &
408 ENET_INIT_PARAM_PTR_MASK);
409 qe_muram_free(init_enet_offset);
410 }
411 *(p_start++) = 0; /* Just for cosmetics */
412 }
413 }
414
415 return 0;
416}
417
418#ifdef DEBUG
419static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
420 volatile u32 *p_start,
421 u8 num_entries,
422 u32 thread_size,
423 qe_risc_allocation_e risc,
424 int skip_page_for_first_entry)
425{
426 u32 init_enet_offset;
427 u8 i;
428 int snum;
429
430 for (i = 0; i < num_entries; i++) {
431 /* Check that this entry was actually valid --
432 needed in case failed in allocations */
433 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
434 snum =
435 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
436 ENET_INIT_PARAM_SNUM_SHIFT;
437 qe_put_snum((u8) snum);
438 if (!((i == 0) && skip_page_for_first_entry)) {
439 /* First entry of Rx does not have page */
440 init_enet_offset =
441 (in_be32(p_start) &
442 ENET_INIT_PARAM_PTR_MASK);
443 ugeth_info("Init enet entry %d:", i);
444 ugeth_info("Base address: 0x%08x",
445 (u32)
446 qe_muram_addr(init_enet_offset));
447 mem_disp(qe_muram_addr(init_enet_offset),
448 thread_size);
449 }
450 p_start++;
451 }
452 }
453
454 return 0;
455}
456#endif
457
458#ifdef CONFIG_UGETH_FILTERING
459static enet_addr_container_t *get_enet_addr_container(void)
460{
461 enet_addr_container_t *enet_addr_cont;
462
463 /* allocate memory */
464 enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
465 if (!enet_addr_cont) {
466 ugeth_err("%s: No memory for enet_addr_container_t object.",
467 __FUNCTION__);
468 return NULL;
469 }
470
471 return enet_addr_cont;
472}
473#endif /* CONFIG_UGETH_FILTERING */
474
475static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
476{
477 kfree(enet_addr_cont);
478}
479
480#ifdef CONFIG_UGETH_FILTERING
481static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
482 enet_addr_t *p_enet_addr, u8 paddr_num)
483{
484 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
485
486 if (!(paddr_num < NUM_OF_PADDRS)) {
487 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
488 return -EINVAL;
489 }
490
491 p_82xx_addr_filt =
492 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
493 addressfiltering;
494
495 /* Ethernet frames are defined in Little Endian mode, */
496 /* therefore to insert the address we reverse the bytes. */
497 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
498 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
499 (u16) (*p_enet_addr)[4]));
500 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
501 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
502 (u16) (*p_enet_addr)[2]));
503 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
504 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
505 (u16) (*p_enet_addr)[0]));
506
507 return 0;
508}
509#endif /* CONFIG_UGETH_FILTERING */
510
511static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
512{
513 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
514
515 if (!(paddr_num < NUM_OF_PADDRS)) {
516 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
517 return -EINVAL;
518 }
519
520 p_82xx_addr_filt =
521 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
522 addressfiltering;
523
524 /* Writing address ff.ff.ff.ff.ff.ff disables address
525 recognition for this register */
526 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
527 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
528 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
529
530 return 0;
531}
532
533static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
534 enet_addr_t *p_enet_addr)
535{
536 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
537 u32 cecr_subblock;
538
539 p_82xx_addr_filt =
540 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
541 addressfiltering;
542
543 cecr_subblock =
544 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
545
546 /* Ethernet frames are defined in Little Endian mode,
547 therefor to insert */
548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
549 out_be16(&p_82xx_addr_filt->taddr.h,
550 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
551 (u16) (*p_enet_addr)[4]));
552 out_be16(&p_82xx_addr_filt->taddr.m,
553 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
554 (u16) (*p_enet_addr)[2]));
555 out_be16(&p_82xx_addr_filt->taddr.l,
556 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
557 (u16) (*p_enet_addr)[0]));
558
559 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
560 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
561}
562
563#ifdef CONFIG_UGETH_MAGIC_PACKET
564static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
565{
566 ucc_fast_private_t *uccf;
567 ucc_geth_t *ug_regs;
568 u32 maccfg2, uccm;
569
570 uccf = ugeth->uccf;
571 ug_regs = ugeth->ug_regs;
572
573 /* Enable interrupts for magic packet detection */
574 uccm = in_be32(uccf->p_uccm);
575 uccm |= UCCE_MPD;
576 out_be32(uccf->p_uccm, uccm);
577
578 /* Enable magic packet detection */
579 maccfg2 = in_be32(&ug_regs->maccfg2);
580 maccfg2 |= MACCFG2_MPE;
581 out_be32(&ug_regs->maccfg2, maccfg2);
582}
583
584static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
585{
586 ucc_fast_private_t *uccf;
587 ucc_geth_t *ug_regs;
588 u32 maccfg2, uccm;
589
590 uccf = ugeth->uccf;
591 ug_regs = ugeth->ug_regs;
592
593 /* Disable interrupts for magic packet detection */
594 uccm = in_be32(uccf->p_uccm);
595 uccm &= ~UCCE_MPD;
596 out_be32(uccf->p_uccm, uccm);
597
598 /* Disable magic packet detection */
599 maccfg2 = in_be32(&ug_regs->maccfg2);
600 maccfg2 &= ~MACCFG2_MPE;
601 out_be32(&ug_regs->maccfg2, maccfg2);
602}
603#endif /* MAGIC_PACKET */
604
605static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
606{
607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
608}
609
610#ifdef DEBUG
611static void get_statistics(ucc_geth_private_t *ugeth,
612 ucc_geth_tx_firmware_statistics_t *
613 tx_firmware_statistics,
614 ucc_geth_rx_firmware_statistics_t *
615 rx_firmware_statistics,
616 ucc_geth_hardware_statistics_t *hardware_statistics)
617{
618 ucc_fast_t *uf_regs;
619 ucc_geth_t *ug_regs;
620 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
621 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
622
623 ug_regs = ugeth->ug_regs;
624 uf_regs = (ucc_fast_t *) ug_regs;
625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
627
628 /* Tx firmware only if user handed pointer and driver actually
629 gathers Tx firmware statistics */
630 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
631 tx_firmware_statistics->sicoltx =
632 in_be32(&p_tx_fw_statistics_pram->sicoltx);
633 tx_firmware_statistics->mulcoltx =
634 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
635 tx_firmware_statistics->latecoltxfr =
636 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
637 tx_firmware_statistics->frabortduecol =
638 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
639 tx_firmware_statistics->frlostinmactxer =
640 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
641 tx_firmware_statistics->carriersenseertx =
642 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
643 tx_firmware_statistics->frtxok =
644 in_be32(&p_tx_fw_statistics_pram->frtxok);
645 tx_firmware_statistics->txfrexcessivedefer =
646 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
647 tx_firmware_statistics->txpkts256 =
648 in_be32(&p_tx_fw_statistics_pram->txpkts256);
649 tx_firmware_statistics->txpkts512 =
650 in_be32(&p_tx_fw_statistics_pram->txpkts512);
651 tx_firmware_statistics->txpkts1024 =
652 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
653 tx_firmware_statistics->txpktsjumbo =
654 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
655 }
656
657 /* Rx firmware only if user handed pointer and driver actually
658 * gathers Rx firmware statistics */
659 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
660 int i;
661 rx_firmware_statistics->frrxfcser =
662 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
663 rx_firmware_statistics->fraligner =
664 in_be32(&p_rx_fw_statistics_pram->fraligner);
665 rx_firmware_statistics->inrangelenrxer =
666 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
667 rx_firmware_statistics->outrangelenrxer =
668 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
669 rx_firmware_statistics->frtoolong =
670 in_be32(&p_rx_fw_statistics_pram->frtoolong);
671 rx_firmware_statistics->runt =
672 in_be32(&p_rx_fw_statistics_pram->runt);
673 rx_firmware_statistics->verylongevent =
674 in_be32(&p_rx_fw_statistics_pram->verylongevent);
675 rx_firmware_statistics->symbolerror =
676 in_be32(&p_rx_fw_statistics_pram->symbolerror);
677 rx_firmware_statistics->dropbsy =
678 in_be32(&p_rx_fw_statistics_pram->dropbsy);
679 for (i = 0; i < 0x8; i++)
680 rx_firmware_statistics->res0[i] =
681 p_rx_fw_statistics_pram->res0[i];
682 rx_firmware_statistics->mismatchdrop =
683 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
684 rx_firmware_statistics->underpkts =
685 in_be32(&p_rx_fw_statistics_pram->underpkts);
686 rx_firmware_statistics->pkts256 =
687 in_be32(&p_rx_fw_statistics_pram->pkts256);
688 rx_firmware_statistics->pkts512 =
689 in_be32(&p_rx_fw_statistics_pram->pkts512);
690 rx_firmware_statistics->pkts1024 =
691 in_be32(&p_rx_fw_statistics_pram->pkts1024);
692 rx_firmware_statistics->pktsjumbo =
693 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
694 rx_firmware_statistics->frlossinmacer =
695 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
696 rx_firmware_statistics->pausefr =
697 in_be32(&p_rx_fw_statistics_pram->pausefr);
698 for (i = 0; i < 0x4; i++)
699 rx_firmware_statistics->res1[i] =
700 p_rx_fw_statistics_pram->res1[i];
701 rx_firmware_statistics->removevlan =
702 in_be32(&p_rx_fw_statistics_pram->removevlan);
703 rx_firmware_statistics->replacevlan =
704 in_be32(&p_rx_fw_statistics_pram->replacevlan);
705 rx_firmware_statistics->insertvlan =
706 in_be32(&p_rx_fw_statistics_pram->insertvlan);
707 }
708
709 /* Hardware only if user handed pointer and driver actually
710 gathers hardware statistics */
711 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
712 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
713 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
714 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
715 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
716 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
717 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
718 hardware_statistics->txok = in_be32(&ug_regs->txok);
719 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
720 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
721 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
722 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
723 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
724 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
725 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
726 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
727 }
728}
729
730static void dump_bds(ucc_geth_private_t *ugeth)
731{
732 int i;
733 int length;
734
735 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
736 if (ugeth->p_tx_bd_ring[i]) {
737 length =
738 (ugeth->ug_info->bdRingLenTx[i] *
739 UCC_GETH_SIZE_OF_BD);
740 ugeth_info("TX BDs[%d]", i);
741 mem_disp(ugeth->p_tx_bd_ring[i], length);
742 }
743 }
744 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
745 if (ugeth->p_rx_bd_ring[i]) {
746 length =
747 (ugeth->ug_info->bdRingLenRx[i] *
748 UCC_GETH_SIZE_OF_BD);
749 ugeth_info("RX BDs[%d]", i);
750 mem_disp(ugeth->p_rx_bd_ring[i], length);
751 }
752 }
753}
754
755static void dump_regs(ucc_geth_private_t *ugeth)
756{
757 int i;
758
759 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
761
762 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
763 (u32) & ugeth->ug_regs->maccfg1,
764 in_be32(&ugeth->ug_regs->maccfg1));
765 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
766 (u32) & ugeth->ug_regs->maccfg2,
767 in_be32(&ugeth->ug_regs->maccfg2));
768 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
769 (u32) & ugeth->ug_regs->ipgifg,
770 in_be32(&ugeth->ug_regs->ipgifg));
771 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->ug_regs->hafdup,
773 in_be32(&ugeth->ug_regs->hafdup));
774 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->ug_regs->miimng.miimcfg,
776 in_be32(&ugeth->ug_regs->miimng.miimcfg));
777 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
778 (u32) & ugeth->ug_regs->miimng.miimcom,
779 in_be32(&ugeth->ug_regs->miimng.miimcom));
780 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
781 (u32) & ugeth->ug_regs->miimng.miimadd,
782 in_be32(&ugeth->ug_regs->miimng.miimadd));
783 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
784 (u32) & ugeth->ug_regs->miimng.miimcon,
785 in_be32(&ugeth->ug_regs->miimng.miimcon));
786 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
787 (u32) & ugeth->ug_regs->miimng.miimstat,
788 in_be32(&ugeth->ug_regs->miimng.miimstat));
789 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
790 (u32) & ugeth->ug_regs->miimng.miimind,
791 in_be32(&ugeth->ug_regs->miimng.miimind));
792 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
793 (u32) & ugeth->ug_regs->ifctl,
794 in_be32(&ugeth->ug_regs->ifctl));
795 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
796 (u32) & ugeth->ug_regs->ifstat,
797 in_be32(&ugeth->ug_regs->ifstat));
798 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
799 (u32) & ugeth->ug_regs->macstnaddr1,
800 in_be32(&ugeth->ug_regs->macstnaddr1));
801 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->ug_regs->macstnaddr2,
803 in_be32(&ugeth->ug_regs->macstnaddr2));
804 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->ug_regs->uempr,
806 in_be32(&ugeth->ug_regs->uempr));
807 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->ug_regs->utbipar,
809 in_be32(&ugeth->ug_regs->utbipar));
810 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
811 (u32) & ugeth->ug_regs->uescr,
812 in_be16(&ugeth->ug_regs->uescr));
813 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->ug_regs->tx64,
815 in_be32(&ugeth->ug_regs->tx64));
816 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->ug_regs->tx127,
818 in_be32(&ugeth->ug_regs->tx127));
819 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->ug_regs->tx255,
821 in_be32(&ugeth->ug_regs->tx255));
822 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->ug_regs->rx64,
824 in_be32(&ugeth->ug_regs->rx64));
825 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->ug_regs->rx127,
827 in_be32(&ugeth->ug_regs->rx127));
828 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
829 (u32) & ugeth->ug_regs->rx255,
830 in_be32(&ugeth->ug_regs->rx255));
831 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
832 (u32) & ugeth->ug_regs->txok,
833 in_be32(&ugeth->ug_regs->txok));
834 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
835 (u32) & ugeth->ug_regs->txcf,
836 in_be16(&ugeth->ug_regs->txcf));
837 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
838 (u32) & ugeth->ug_regs->tmca,
839 in_be32(&ugeth->ug_regs->tmca));
840 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
841 (u32) & ugeth->ug_regs->tbca,
842 in_be32(&ugeth->ug_regs->tbca));
843 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->ug_regs->rxfok,
845 in_be32(&ugeth->ug_regs->rxfok));
846 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->ug_regs->rxbok,
848 in_be32(&ugeth->ug_regs->rxbok));
849 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
850 (u32) & ugeth->ug_regs->rbyt,
851 in_be32(&ugeth->ug_regs->rbyt));
852 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
853 (u32) & ugeth->ug_regs->rmca,
854 in_be32(&ugeth->ug_regs->rmca));
855 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->ug_regs->rbca,
857 in_be32(&ugeth->ug_regs->rbca));
858 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->ug_regs->scar,
860 in_be32(&ugeth->ug_regs->scar));
861 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
862 (u32) & ugeth->ug_regs->scam,
863 in_be32(&ugeth->ug_regs->scam));
864
865 if (ugeth->p_thread_data_tx) {
866 int numThreadsTxNumerical;
867 switch (ugeth->ug_info->numThreadsTx) {
868 case UCC_GETH_NUM_OF_THREADS_1:
869 numThreadsTxNumerical = 1;
870 break;
871 case UCC_GETH_NUM_OF_THREADS_2:
872 numThreadsTxNumerical = 2;
873 break;
874 case UCC_GETH_NUM_OF_THREADS_4:
875 numThreadsTxNumerical = 4;
876 break;
877 case UCC_GETH_NUM_OF_THREADS_6:
878 numThreadsTxNumerical = 6;
879 break;
880 case UCC_GETH_NUM_OF_THREADS_8:
881 numThreadsTxNumerical = 8;
882 break;
883 default:
884 numThreadsTxNumerical = 0;
885 break;
886 }
887
888 ugeth_info("Thread data TXs:");
889 ugeth_info("Base address: 0x%08x",
890 (u32) ugeth->p_thread_data_tx);
891 for (i = 0; i < numThreadsTxNumerical; i++) {
892 ugeth_info("Thread data TX[%d]:", i);
893 ugeth_info("Base address: 0x%08x",
894 (u32) & ugeth->p_thread_data_tx[i]);
895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
896 sizeof(ucc_geth_thread_data_tx_t));
897 }
898 }
899 if (ugeth->p_thread_data_rx) {
900 int numThreadsRxNumerical;
901 switch (ugeth->ug_info->numThreadsRx) {
902 case UCC_GETH_NUM_OF_THREADS_1:
903 numThreadsRxNumerical = 1;
904 break;
905 case UCC_GETH_NUM_OF_THREADS_2:
906 numThreadsRxNumerical = 2;
907 break;
908 case UCC_GETH_NUM_OF_THREADS_4:
909 numThreadsRxNumerical = 4;
910 break;
911 case UCC_GETH_NUM_OF_THREADS_6:
912 numThreadsRxNumerical = 6;
913 break;
914 case UCC_GETH_NUM_OF_THREADS_8:
915 numThreadsRxNumerical = 8;
916 break;
917 default:
918 numThreadsRxNumerical = 0;
919 break;
920 }
921
922 ugeth_info("Thread data RX:");
923 ugeth_info("Base address: 0x%08x",
924 (u32) ugeth->p_thread_data_rx);
925 for (i = 0; i < numThreadsRxNumerical; i++) {
926 ugeth_info("Thread data RX[%d]:", i);
927 ugeth_info("Base address: 0x%08x",
928 (u32) & ugeth->p_thread_data_rx[i]);
929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
930 sizeof(ucc_geth_thread_data_rx_t));
931 }
932 }
933 if (ugeth->p_exf_glbl_param) {
934 ugeth_info("EXF global param:");
935 ugeth_info("Base address: 0x%08x",
936 (u32) ugeth->p_exf_glbl_param);
937 mem_disp((u8 *) ugeth->p_exf_glbl_param,
938 sizeof(*ugeth->p_exf_glbl_param));
939 }
940 if (ugeth->p_tx_glbl_pram) {
941 ugeth_info("TX global param:");
942 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
943 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
944 (u32) & ugeth->p_tx_glbl_pram->temoder,
945 in_be16(&ugeth->p_tx_glbl_pram->temoder));
946 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
947 (u32) & ugeth->p_tx_glbl_pram->sqptr,
948 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
949 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
950 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
951 in_be32(&ugeth->p_tx_glbl_pram->
952 schedulerbasepointer));
953 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
955 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
956 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_tx_glbl_pram->tstate,
958 in_be32(&ugeth->p_tx_glbl_pram->tstate));
959 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
960 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
961 ugeth->p_tx_glbl_pram->iphoffset[0]);
962 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
963 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
964 ugeth->p_tx_glbl_pram->iphoffset[1]);
965 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
966 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
967 ugeth->p_tx_glbl_pram->iphoffset[2]);
968 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
969 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
970 ugeth->p_tx_glbl_pram->iphoffset[3]);
971 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
972 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
973 ugeth->p_tx_glbl_pram->iphoffset[4]);
974 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
975 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
976 ugeth->p_tx_glbl_pram->iphoffset[5]);
977 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
978 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
979 ugeth->p_tx_glbl_pram->iphoffset[6]);
980 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
981 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
982 ugeth->p_tx_glbl_pram->iphoffset[7]);
983 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
985 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
986 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
988 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
989 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
991 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
992 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
994 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
995 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
997 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
998 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
1000 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
1001 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
1003 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
1004 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
1006 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
1007 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
1008 (u32) & ugeth->p_tx_glbl_pram->tqptr,
1009 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
1010 }
1011 if (ugeth->p_rx_glbl_pram) {
1012 ugeth_info("RX global param:");
1013 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1014 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_glbl_pram->remoder,
1016 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1017 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1018 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1019 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1020 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1021 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1022 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1023 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1024 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1025 ugeth->p_rx_glbl_pram->rxgstpack);
1026 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1027 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1028 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1029 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1030 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1031 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1032 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1033 (u32) & ugeth->p_rx_glbl_pram->rstate,
1034 ugeth->p_rx_glbl_pram->rstate);
1035 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1036 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1037 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1038 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1040 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1041 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1042 (u32) & ugeth->p_rx_glbl_pram->mflr,
1043 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1044 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1045 (u32) & ugeth->p_rx_glbl_pram->minflr,
1046 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1047 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1048 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1049 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1050 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1051 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1052 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1053 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1054 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1055 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1056 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1057 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1058 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1059 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1060 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1061 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1062 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1063 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1064 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1065 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1067 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1068 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1069 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1070 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1071 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1072 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1073 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1074 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1076 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1077 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1079 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1080 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1081 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1082 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1083 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1084 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1085 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1086 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1087 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1088 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1089 for (i = 0; i < 64; i++)
1090 ugeth_info
1091 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1092 i,
1093 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1094 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1095 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1096 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1097 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1098 }
1099 if (ugeth->p_send_q_mem_reg) {
1100 ugeth_info("Send Q memory registers:");
1101 ugeth_info("Base address: 0x%08x",
1102 (u32) ugeth->p_send_q_mem_reg);
1103 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1104 ugeth_info("SQQD[%d]:", i);
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1108 sizeof(ucc_geth_send_queue_qd_t));
1109 }
1110 }
1111 if (ugeth->p_scheduler) {
1112 ugeth_info("Scheduler:");
1113 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1114 mem_disp((u8 *) ugeth->p_scheduler,
1115 sizeof(*ugeth->p_scheduler));
1116 }
1117 if (ugeth->p_tx_fw_statistics_pram) {
1118 ugeth_info("TX FW statistics pram:");
1119 ugeth_info("Base address: 0x%08x",
1120 (u32) ugeth->p_tx_fw_statistics_pram);
1121 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1122 sizeof(*ugeth->p_tx_fw_statistics_pram));
1123 }
1124 if (ugeth->p_rx_fw_statistics_pram) {
1125 ugeth_info("RX FW statistics pram:");
1126 ugeth_info("Base address: 0x%08x",
1127 (u32) ugeth->p_rx_fw_statistics_pram);
1128 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1129 sizeof(*ugeth->p_rx_fw_statistics_pram));
1130 }
1131 if (ugeth->p_rx_irq_coalescing_tbl) {
1132 ugeth_info("RX IRQ coalescing tables:");
1133 ugeth_info("Base address: 0x%08x",
1134 (u32) ugeth->p_rx_irq_coalescing_tbl);
1135 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1136 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1137 ugeth_info("Base address: 0x%08x",
1138 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1139 coalescingentry[i]);
1140 ugeth_info
1141 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1142 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1143 coalescingentry[i].interruptcoalescingmaxvalue,
1144 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1145 coalescingentry[i].
1146 interruptcoalescingmaxvalue));
1147 ugeth_info
1148 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1149 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1150 coalescingentry[i].interruptcoalescingcounter,
1151 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1152 coalescingentry[i].
1153 interruptcoalescingcounter));
1154 }
1155 }
1156 if (ugeth->p_rx_bd_qs_tbl) {
1157 ugeth_info("RX BD QS tables:");
1158 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1159 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1160 ugeth_info("RX BD QS table[%d]:", i);
1161 ugeth_info("Base address: 0x%08x",
1162 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1163 ugeth_info
1164 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1167 ugeth_info
1168 ("bdptr : addr - 0x%08x, val - 0x%08x",
1169 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1170 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1171 ugeth_info
1172 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1173 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1174 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1175 externalbdbaseptr));
1176 ugeth_info
1177 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1178 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1179 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1180 ugeth_info("ucode RX Prefetched BDs:");
1181 ugeth_info("Base address: 0x%08x",
1182 (u32)
1183 qe_muram_addr(in_be32
1184 (&ugeth->p_rx_bd_qs_tbl[i].
1185 bdbaseptr)));
1186 mem_disp((u8 *)
1187 qe_muram_addr(in_be32
1188 (&ugeth->p_rx_bd_qs_tbl[i].
1189 bdbaseptr)),
1190 sizeof(ucc_geth_rx_prefetched_bds_t));
1191 }
1192 }
1193 if (ugeth->p_init_enet_param_shadow) {
1194 int size;
1195 ugeth_info("Init enet param shadow:");
1196 ugeth_info("Base address: 0x%08x",
1197 (u32) ugeth->p_init_enet_param_shadow);
1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1199 sizeof(*ugeth->p_init_enet_param_shadow));
1200
1201 size = sizeof(ucc_geth_thread_rx_pram_t);
1202 if (ugeth->ug_info->rxExtendedFiltering) {
1203 size +=
1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1205 if (ugeth->ug_info->largestexternallookupkeysize ==
1206 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1207 size +=
1208 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1209 if (ugeth->ug_info->largestexternallookupkeysize ==
1210 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1211 size +=
1212 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1213 }
1214
1215 dump_init_enet_entries(ugeth,
1216 &(ugeth->p_init_enet_param_shadow->
1217 txthread[0]),
1218 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1219 sizeof(ucc_geth_thread_tx_pram_t),
1220 ugeth->ug_info->riscTx, 0);
1221 dump_init_enet_entries(ugeth,
1222 &(ugeth->p_init_enet_param_shadow->
1223 rxthread[0]),
1224 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1225 ugeth->ug_info->riscRx, 1);
1226 }
1227}
1228#endif /* DEBUG */
1229
1230static void init_default_reg_vals(volatile u32 *upsmr_register,
1231 volatile u32 *maccfg1_register,
1232 volatile u32 *maccfg2_register)
1233{
1234 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1235 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1236 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1237}
1238
1239static int init_half_duplex_params(int alt_beb,
1240 int back_pressure_no_backoff,
1241 int no_backoff,
1242 int excess_defer,
1243 u8 alt_beb_truncation,
1244 u8 max_retransmissions,
1245 u8 collision_window,
1246 volatile u32 *hafdup_register)
1247{
1248 u32 value = 0;
1249
1250 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1251 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1252 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1253 return -EINVAL;
1254
1255 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1256
1257 if (alt_beb)
1258 value |= HALFDUP_ALT_BEB;
1259 if (back_pressure_no_backoff)
1260 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1261 if (no_backoff)
1262 value |= HALFDUP_NO_BACKOFF;
1263 if (excess_defer)
1264 value |= HALFDUP_EXCESSIVE_DEFER;
1265
1266 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1267
1268 value |= collision_window;
1269
1270 out_be32(hafdup_register, value);
1271 return 0;
1272}
1273
1274static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1275 u8 non_btb_ipg,
1276 u8 min_ifg,
1277 u8 btb_ipg,
1278 volatile u32 *ipgifg_register)
1279{
1280 u32 value = 0;
1281
1282 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1283 IPG part 2 */
1284 if (non_btb_cs_ipg > non_btb_ipg)
1285 return -EINVAL;
1286
1287 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1288 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1289 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1290 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1291 return -EINVAL;
1292
1293 value |=
1294 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1295 IPGIFG_NBTB_CS_IPG_MASK);
1296 value |=
1297 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1298 IPGIFG_NBTB_IPG_MASK);
1299 value |=
1300 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1301 IPGIFG_MIN_IFG_MASK);
1302 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1303
1304 out_be32(ipgifg_register, value);
1305 return 0;
1306}
1307
1308static int init_flow_control_params(u32 automatic_flow_control_mode,
1309 int rx_flow_control_enable,
1310 int tx_flow_control_enable,
1311 u16 pause_period,
1312 u16 extension_field,
1313 volatile u32 *upsmr_register,
1314 volatile u32 *uempr_register,
1315 volatile u32 *maccfg1_register)
1316{
1317 u32 value = 0;
1318
1319 /* Set UEMPR register */
1320 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1321 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1322 out_be32(uempr_register, value);
1323
1324 /* Set UPSMR register */
1325 value = in_be32(upsmr_register);
1326 value |= automatic_flow_control_mode;
1327 out_be32(upsmr_register, value);
1328
1329 value = in_be32(maccfg1_register);
1330 if (rx_flow_control_enable)
1331 value |= MACCFG1_FLOW_RX;
1332 if (tx_flow_control_enable)
1333 value |= MACCFG1_FLOW_TX;
1334 out_be32(maccfg1_register, value);
1335
1336 return 0;
1337}
1338
1339static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1340 int auto_zero_hardware_statistics,
1341 volatile u32 *upsmr_register,
1342 volatile u16 *uescr_register)
1343{
1344 u32 upsmr_value = 0;
1345 u16 uescr_value = 0;
1346 /* Enable hardware statistics gathering if requested */
1347 if (enable_hardware_statistics) {
1348 upsmr_value = in_be32(upsmr_register);
1349 upsmr_value |= UPSMR_HSE;
1350 out_be32(upsmr_register, upsmr_value);
1351 }
1352
1353 /* Clear hardware statistics counters */
1354 uescr_value = in_be16(uescr_register);
1355 uescr_value |= UESCR_CLRCNT;
1356 /* Automatically zero hardware statistics counters on read,
1357 if requested */
1358 if (auto_zero_hardware_statistics)
1359 uescr_value |= UESCR_AUTOZ;
1360 out_be16(uescr_register, uescr_value);
1361
1362 return 0;
1363}
1364
1365static int init_firmware_statistics_gathering_mode(int
1366 enable_tx_firmware_statistics,
1367 int enable_rx_firmware_statistics,
1368 volatile u32 *tx_rmon_base_ptr,
1369 u32 tx_firmware_statistics_structure_address,
1370 volatile u32 *rx_rmon_base_ptr,
1371 u32 rx_firmware_statistics_structure_address,
1372 volatile u16 *temoder_register,
1373 volatile u32 *remoder_register)
1374{
1375 /* Note: this function does not check if */
1376 /* the parameters it receives are NULL */
1377 u16 temoder_value;
1378 u32 remoder_value;
1379
1380 if (enable_tx_firmware_statistics) {
1381 out_be32(tx_rmon_base_ptr,
1382 tx_firmware_statistics_structure_address);
1383 temoder_value = in_be16(temoder_register);
1384 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1385 out_be16(temoder_register, temoder_value);
1386 }
1387
1388 if (enable_rx_firmware_statistics) {
1389 out_be32(rx_rmon_base_ptr,
1390 rx_firmware_statistics_structure_address);
1391 remoder_value = in_be32(remoder_register);
1392 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1393 out_be32(remoder_register, remoder_value);
1394 }
1395
1396 return 0;
1397}
1398
1399static int init_mac_station_addr_regs(u8 address_byte_0,
1400 u8 address_byte_1,
1401 u8 address_byte_2,
1402 u8 address_byte_3,
1403 u8 address_byte_4,
1404 u8 address_byte_5,
1405 volatile u32 *macstnaddr1_register,
1406 volatile u32 *macstnaddr2_register)
1407{
1408 u32 value = 0;
1409
1410 /* Example: for a station address of 0x12345678ABCD, */
1411 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1412
1413 /* MACSTNADDR1 Register: */
1414
1415 /* 0 7 8 15 */
1416 /* station address byte 5 station address byte 4 */
1417 /* 16 23 24 31 */
1418 /* station address byte 3 station address byte 2 */
1419 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1420 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1421 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1422 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1423
1424 out_be32(macstnaddr1_register, value);
1425
1426 /* MACSTNADDR2 Register: */
1427
1428 /* 0 7 8 15 */
1429 /* station address byte 1 station address byte 0 */
1430 /* 16 23 24 31 */
1431 /* reserved reserved */
1432 value = 0;
1433 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1434 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1435
1436 out_be32(macstnaddr2_register, value);
1437
1438 return 0;
1439}
1440
1441static int init_mac_duplex_mode(int full_duplex,
1442 int limited_to_full_duplex,
1443 volatile u32 *maccfg2_register)
1444{
1445 u32 value = 0;
1446
1447 /* some interfaces must work in full duplex mode */
1448 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1449 return -EINVAL;
1450
1451 value = in_be32(maccfg2_register);
1452
1453 if (full_duplex)
1454 value |= MACCFG2_FDX;
1455 else
1456 value &= ~MACCFG2_FDX;
1457
1458 out_be32(maccfg2_register, value);
1459 return 0;
1460}
1461
1462static int init_check_frame_length_mode(int length_check,
1463 volatile u32 *maccfg2_register)
1464{
1465 u32 value = 0;
1466
1467 value = in_be32(maccfg2_register);
1468
1469 if (length_check)
1470 value |= MACCFG2_LC;
1471 else
1472 value &= ~MACCFG2_LC;
1473
1474 out_be32(maccfg2_register, value);
1475 return 0;
1476}
1477
1478static int init_preamble_length(u8 preamble_length,
1479 volatile u32 *maccfg2_register)
1480{
1481 u32 value = 0;
1482
1483 if ((preamble_length < 3) || (preamble_length > 7))
1484 return -EINVAL;
1485
1486 value = in_be32(maccfg2_register);
1487 value &= ~MACCFG2_PREL_MASK;
1488 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1489 out_be32(maccfg2_register, value);
1490 return 0;
1491}
1492
1493static int init_mii_management_configuration(int reset_mgmt,
1494 int preamble_supress,
1495 volatile u32 *miimcfg_register,
1496 volatile u32 *miimind_register)
1497{
1498 unsigned int timeout = PHY_INIT_TIMEOUT;
1499 u32 value = 0;
1500
1501 value = in_be32(miimcfg_register);
1502 if (reset_mgmt) {
1503 value |= MIIMCFG_RESET_MANAGEMENT;
1504 out_be32(miimcfg_register, value);
1505 }
1506
1507 value = 0;
1508
1509 if (preamble_supress)
1510 value |= MIIMCFG_NO_PREAMBLE;
1511
1512 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1513 out_be32(miimcfg_register, value);
1514
1515 /* Wait until the bus is free */
1516 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1517 cpu_relax();
1518
1519 if (timeout <= 0) {
1520 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1521 return -ETIMEDOUT;
1522 }
1523
1524 return 0;
1525}
1526
1527static int init_rx_parameters(int reject_broadcast,
1528 int receive_short_frames,
1529 int promiscuous, volatile u32 *upsmr_register)
1530{
1531 u32 value = 0;
1532
1533 value = in_be32(upsmr_register);
1534
1535 if (reject_broadcast)
1536 value |= UPSMR_BRO;
1537 else
1538 value &= ~UPSMR_BRO;
1539
1540 if (receive_short_frames)
1541 value |= UPSMR_RSH;
1542 else
1543 value &= ~UPSMR_RSH;
1544
1545 if (promiscuous)
1546 value |= UPSMR_PRO;
1547 else
1548 value &= ~UPSMR_PRO;
1549
1550 out_be32(upsmr_register, value);
1551
1552 return 0;
1553}
1554
1555static int init_max_rx_buff_len(u16 max_rx_buf_len,
1556 volatile u16 *mrblr_register)
1557{
1558 /* max_rx_buf_len value must be a multiple of 128 */
1559 if ((max_rx_buf_len == 0)
1560 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1561 return -EINVAL;
1562
1563 out_be16(mrblr_register, max_rx_buf_len);
1564 return 0;
1565}
1566
1567static int init_min_frame_len(u16 min_frame_length,
1568 volatile u16 *minflr_register,
1569 volatile u16 *mrblr_register)
1570{
1571 u16 mrblr_value = 0;
1572
1573 mrblr_value = in_be16(mrblr_register);
1574 if (min_frame_length >= (mrblr_value - 4))
1575 return -EINVAL;
1576
1577 out_be16(minflr_register, min_frame_length);
1578 return 0;
1579}
1580
1581static int adjust_enet_interface(ucc_geth_private_t *ugeth)
1582{
1583 ucc_geth_info_t *ug_info;
1584 ucc_geth_t *ug_regs;
1585 ucc_fast_t *uf_regs;
1586 enet_speed_e speed;
1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1588 0, limited_to_full_duplex = 0;
1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1590 u16 value;
1591
1592 ugeth_vdbg("%s: IN", __FUNCTION__);
1593
1594 ug_info = ugeth->ug_info;
1595 ug_regs = ugeth->ug_regs;
1596 uf_regs = ugeth->uccf->uf_regs;
1597
1598 /* Analyze enet_interface according to Interface Mode Configuration
1599 table */
1600 ret_val =
1601 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1602 &rpm, &tbi, &limited_to_full_duplex);
1603 if (ret_val != 0) {
1604 ugeth_err
1605 ("%s: half duplex not supported in requested configuration.",
1606 __FUNCTION__);
1607 return ret_val;
1608 }
1609
1610 /* Set MACCFG2 */
1611 maccfg2 = in_be32(&ug_regs->maccfg2);
1612 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1613 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1614 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1615 else if (speed == ENET_SPEED_1000BT)
1616 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1617 maccfg2 |= ug_info->padAndCrc;
1618 out_be32(&ug_regs->maccfg2, maccfg2);
1619
1620 /* Set UPSMR */
1621 upsmr = in_be32(&uf_regs->upsmr);
1622 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1623 if (rpm)
1624 upsmr |= UPSMR_RPM;
1625 if (r10m)
1626 upsmr |= UPSMR_R10M;
1627 if (tbi)
1628 upsmr |= UPSMR_TBIM;
1629 if (rmm)
1630 upsmr |= UPSMR_RMM;
1631 out_be32(&uf_regs->upsmr, upsmr);
1632
1633 /* Set UTBIPAR */
1634 utbipar = in_be32(&ug_regs->utbipar);
1635 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1636 if (tbi)
1637 utbipar |=
1638 (ug_info->phy_address +
1639 ugeth->ug_info->uf_info.
1640 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1641 else
1642 utbipar |=
1643 (0x10 +
1644 ugeth->ug_info->uf_info.
1645 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1646 out_be32(&ug_regs->utbipar, utbipar);
1647
1648 /* Disable autonegotiation in tbi mode, because by default it
1649 comes up in autonegotiation mode. */
1650 /* Note that this depends on proper setting in utbipar register. */
1651 if (tbi) {
1652 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1653 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1654 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1655 value =
1656 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1657 ENET_TBI_MII_CR);
1658 value &= ~0x1000; /* Turn off autonegotiation */
1659 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1660 ENET_TBI_MII_CR, value);
1661 }
1662
1663 ret_val = init_mac_duplex_mode(1,
1664 limited_to_full_duplex,
1665 &ug_regs->maccfg2);
1666 if (ret_val != 0) {
1667 ugeth_err
1668 ("%s: half duplex not supported in requested configuration.",
1669 __FUNCTION__);
1670 return ret_val;
1671 }
1672
1673 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1674
1675 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1676 if (ret_val != 0) {
1677 ugeth_err
1678 ("%s: Preamble length must be between 3 and 7 inclusive.",
1679 __FUNCTION__);
1680 return ret_val;
1681 }
1682
1683 return 0;
1684}
1685
1686/* Called every time the controller might need to be made
1687 * aware of new link state. The PHY code conveys this
1688 * information through variables in the ugeth structure, and this
1689 * function converts those variables into the appropriate
1690 * register values, and can bring down the device if needed.
1691 */
1692static void adjust_link(struct net_device *dev)
1693{
1694 ucc_geth_private_t *ugeth = netdev_priv(dev);
1695 ucc_geth_t *ug_regs;
1696 u32 tempval;
1697 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1698
1699 ug_regs = ugeth->ug_regs;
1700
1701 if (mii_info->link) {
1702 /* Now we make sure that we can be in full duplex mode.
1703 * If not, we operate in half-duplex mode. */
1704 if (mii_info->duplex != ugeth->oldduplex) {
1705 if (!(mii_info->duplex)) {
1706 tempval = in_be32(&ug_regs->maccfg2);
1707 tempval &= ~(MACCFG2_FDX);
1708 out_be32(&ug_regs->maccfg2, tempval);
1709
1710 ugeth_info("%s: Half Duplex", dev->name);
1711 } else {
1712 tempval = in_be32(&ug_regs->maccfg2);
1713 tempval |= MACCFG2_FDX;
1714 out_be32(&ug_regs->maccfg2, tempval);
1715
1716 ugeth_info("%s: Full Duplex", dev->name);
1717 }
1718
1719 ugeth->oldduplex = mii_info->duplex;
1720 }
1721
1722 if (mii_info->speed != ugeth->oldspeed) {
1723 switch (mii_info->speed) {
1724 case 1000:
1725#ifdef CONFIG_MPC836x
1726/* FIXME: This code is for 100Mbs BUG fixing,
1727remove this when it is fixed!!! */
1728 if (ugeth->ug_info->enet_interface ==
1729 ENET_1000_GMII)
1730 /* Run the commands which initialize the PHY */
1731 {
1732 tempval =
1733 (u32) mii_info->mdio_read(ugeth->
1734 dev, mii_info->mii_id, 0x1b);
1735 tempval |= 0x000f;
1736 mii_info->mdio_write(ugeth->dev,
1737 mii_info->mii_id, 0x1b,
1738 (u16) tempval);
1739 tempval =
1740 (u32) mii_info->mdio_read(ugeth->
1741 dev, mii_info->mii_id,
1742 MII_BMCR);
1743 mii_info->mdio_write(ugeth->dev,
1744 mii_info->mii_id, MII_BMCR,
1745 (u16) (tempval | BMCR_RESET));
1746 } else if (ugeth->ug_info->enet_interface ==
1747 ENET_1000_RGMII)
1748 /* Run the commands which initialize the PHY */
1749 {
1750 tempval =
1751 (u32) mii_info->mdio_read(ugeth->
1752 dev, mii_info->mii_id, 0x1b);
1753 tempval = (tempval & ~0x000f) | 0x000b;
1754 mii_info->mdio_write(ugeth->dev,
1755 mii_info->mii_id, 0x1b,
1756 (u16) tempval);
1757 tempval =
1758 (u32) mii_info->mdio_read(ugeth->
1759 dev, mii_info->mii_id,
1760 MII_BMCR);
1761 mii_info->mdio_write(ugeth->dev,
1762 mii_info->mii_id, MII_BMCR,
1763 (u16) (tempval | BMCR_RESET));
1764 }
1765 msleep(4000);
1766#endif /* CONFIG_MPC8360 */
1767 adjust_enet_interface(ugeth);
1768 break;
1769 case 100:
1770 case 10:
1771#ifdef CONFIG_MPC836x
1772/* FIXME: This code is for 100Mbs BUG fixing,
1773remove this lines when it will be fixed!!! */
1774 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1775 tempval =
1776 (u32) mii_info->mdio_read(ugeth->dev,
1777 mii_info->mii_id,
1778 0x1b);
1779 tempval = (tempval & ~0x000f) | 0x000b;
1780 mii_info->mdio_write(ugeth->dev,
1781 mii_info->mii_id, 0x1b,
1782 (u16) tempval);
1783 tempval =
1784 (u32) mii_info->mdio_read(ugeth->dev,
1785 mii_info->mii_id,
1786 MII_BMCR);
1787 mii_info->mdio_write(ugeth->dev,
1788 mii_info->mii_id, MII_BMCR,
1789 (u16) (tempval |
1790 BMCR_RESET));
1791 msleep(4000);
1792#endif /* CONFIG_MPC8360 */
1793 adjust_enet_interface(ugeth);
1794 break;
1795 default:
1796 ugeth_warn
1797 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1798 dev->name, mii_info->speed);
1799 break;
1800 }
1801
1802 ugeth_info("%s: Speed %dBT", dev->name,
1803 mii_info->speed);
1804
1805 ugeth->oldspeed = mii_info->speed;
1806 }
1807
1808 if (!ugeth->oldlink) {
1809 ugeth_info("%s: Link is up", dev->name);
1810 ugeth->oldlink = 1;
1811 netif_carrier_on(dev);
1812 netif_schedule(dev);
1813 }
1814 } else {
1815 if (ugeth->oldlink) {
1816 ugeth_info("%s: Link is down", dev->name);
1817 ugeth->oldlink = 0;
1818 ugeth->oldspeed = 0;
1819 ugeth->oldduplex = -1;
1820 netif_carrier_off(dev);
1821 }
1822 }
1823}
1824
1825/* Configure the PHY for dev.
1826 * returns 0 if success. -1 if failure
1827 */
1828static int init_phy(struct net_device *dev)
1829{
1830 ucc_geth_private_t *ugeth = netdev_priv(dev);
1831 struct phy_info *curphy;
1832 ucc_mii_mng_t *mii_regs;
1833 struct ugeth_mii_info *mii_info;
1834 int err;
1835
1836 mii_regs = &ugeth->ug_regs->miimng;
1837
1838 ugeth->oldlink = 0;
1839 ugeth->oldspeed = 0;
1840 ugeth->oldduplex = -1;
1841
1842 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1843
1844 if (NULL == mii_info) {
1845 ugeth_err("%s: Could not allocate mii_info", dev->name);
1846 return -ENOMEM;
1847 }
1848
1849 mii_info->mii_regs = mii_regs;
1850 mii_info->speed = SPEED_1000;
1851 mii_info->duplex = DUPLEX_FULL;
1852 mii_info->pause = 0;
1853 mii_info->link = 0;
1854
1855 mii_info->advertising = (ADVERTISED_10baseT_Half |
1856 ADVERTISED_10baseT_Full |
1857 ADVERTISED_100baseT_Half |
1858 ADVERTISED_100baseT_Full |
1859 ADVERTISED_1000baseT_Full);
1860 mii_info->autoneg = 1;
1861
1862 mii_info->mii_id = ugeth->ug_info->phy_address;
1863
1864 mii_info->dev = dev;
1865
1866 mii_info->mdio_read = &read_phy_reg;
1867 mii_info->mdio_write = &write_phy_reg;
1868
1869 ugeth->mii_info = mii_info;
1870
1871 spin_lock_irq(&ugeth->lock);
1872
1873 /* Set this UCC to be the master of the MII managment */
1874 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1875
1876 if (init_mii_management_configuration(1,
1877 ugeth->ug_info->
1878 miiPreambleSupress,
1879 &mii_regs->miimcfg,
1880 &mii_regs->miimind)) {
1881 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1882 err = -1;
1883 goto bus_fail;
1884 }
1885
1886 spin_unlock_irq(&ugeth->lock);
1887
1888 /* get info for this PHY */
1889 curphy = get_phy_info(ugeth->mii_info);
1890
1891 if (curphy == NULL) {
1892 ugeth_err("%s: No PHY found", dev->name);
1893 err = -1;
1894 goto no_phy;
1895 }
1896
1897 mii_info->phyinfo = curphy;
1898
1899 /* Run the commands which initialize the PHY */
1900 if (curphy->init) {
1901 err = curphy->init(ugeth->mii_info);
1902 if (err)
1903 goto phy_init_fail;
1904 }
1905
1906 return 0;
1907
1908 phy_init_fail:
1909 no_phy:
1910 bus_fail:
1911 kfree(mii_info);
1912
1913 return err;
1914}
1915
1916#ifdef CONFIG_UGETH_TX_ON_DEMOND
1917static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
1918{
1919 ucc_fast_transmit_on_demand(ugeth->uccf);
1920
1921 return 0;
1922}
1923#endif
1924
1925static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
1926{
1927 ucc_fast_private_t *uccf;
1928 u32 cecr_subblock;
1929 u32 temp;
1930
1931 uccf = ugeth->uccf;
1932
1933 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1934 temp = in_be32(uccf->p_uccm);
1935 temp &= ~UCCE_GRA;
1936 out_be32(uccf->p_uccm, temp);
1937 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1938
1939 /* Issue host command */
1940 cecr_subblock =
1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1943 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1944
1945 /* Wait for command to complete */
1946 do {
1947 temp = in_be32(uccf->p_ucce);
1948 } while (!(temp & UCCE_GRA));
1949
1950 uccf->stopped_tx = 1;
1951
1952 return 0;
1953}
1954
1955static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
1956{
1957 ucc_fast_private_t *uccf;
1958 u32 cecr_subblock;
1959 u8 temp;
1960
1961 uccf = ugeth->uccf;
1962
1963 /* Clear acknowledge bit */
1964 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1965 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1966 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1967
1968 /* Keep issuing command and checking acknowledge bit until
1969 it is asserted, according to spec */
1970 do {
1971 /* Issue host command */
1972 cecr_subblock =
1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1974 ucc_num);
1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1976 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1977
1978 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1980
1981 uccf->stopped_rx = 1;
1982
1983 return 0;
1984}
1985
1986static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
1987{
1988 ucc_fast_private_t *uccf;
1989 u32 cecr_subblock;
1990
1991 uccf = ugeth->uccf;
1992
1993 cecr_subblock =
1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1995 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
1996 0);
1997 uccf->stopped_tx = 0;
1998
1999 return 0;
2000}
2001
2002static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
2003{
2004 ucc_fast_private_t *uccf;
2005 u32 cecr_subblock;
2006
2007 uccf = ugeth->uccf;
2008
2009 cecr_subblock =
2010 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
2011 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
2012 0);
2013 uccf->stopped_rx = 0;
2014
2015 return 0;
2016}
2017
2018static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
2019{
2020 ucc_fast_private_t *uccf;
2021 int enabled_tx, enabled_rx;
2022
2023 uccf = ugeth->uccf;
2024
2025 /* check if the UCC number is in range. */
2026 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2027 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2028 return -EINVAL;
2029 }
2030
2031 enabled_tx = uccf->enabled_tx;
2032 enabled_rx = uccf->enabled_rx;
2033
2034 /* Get Tx and Rx going again, in case this channel was actively
2035 disabled. */
2036 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
2037 ugeth_restart_tx(ugeth);
2038 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
2039 ugeth_restart_rx(ugeth);
2040
2041 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
2042
2043 return 0;
2044
2045}
2046
2047static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
2048{
2049 ucc_fast_private_t *uccf;
2050
2051 uccf = ugeth->uccf;
2052
2053 /* check if the UCC number is in range. */
2054 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2055 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2056 return -EINVAL;
2057 }
2058
2059 /* Stop any transmissions */
2060 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
2061 ugeth_graceful_stop_tx(ugeth);
2062
2063 /* Stop any receptions */
2064 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
2065 ugeth_graceful_stop_rx(ugeth);
2066
2067 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
2068
2069 return 0;
2070}
2071
2072static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
2073{
2074#ifdef DEBUG
2075 ucc_fast_dump_regs(ugeth->uccf);
2076 dump_regs(ugeth);
2077 dump_bds(ugeth);
2078#endif
2079}
2080
2081#ifdef CONFIG_UGETH_FILTERING
2082static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
2083 p_UccGethTadParams,
2084 qe_fltr_tad_t *qe_fltr_tad)
2085{
2086 u16 temp;
2087
2088 /* Zero serialized TAD */
2089 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2090
2091 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2092 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2093 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2094 || (p_UccGethTadParams->vnontag_op !=
2095 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2096 )
2097 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2098 if (p_UccGethTadParams->reject_frame)
2099 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2100 temp =
2101 (u16) (((u16) p_UccGethTadParams->
2102 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2103 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2104
2105 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2106 if (p_UccGethTadParams->vnontag_op ==
2107 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2108 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2109 qe_fltr_tad->serialized[1] |=
2110 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2111
2112 qe_fltr_tad->serialized[2] |=
2113 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2114 /* upper bits */
2115 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2116 /* lower bits */
2117 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2118
2119 return 0;
2120}
2121
2122static enet_addr_container_t
2123 *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
2124 enet_addr_t *p_enet_addr)
2125{
2126 enet_addr_container_t *enet_addr_cont;
2127 struct list_head *p_lh;
2128 u16 i, num;
2129 int32_t j;
2130 u8 *p_counter;
2131
2132 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2133 p_lh = &ugeth->group_hash_q;
2134 p_counter = &(ugeth->numGroupAddrInHash);
2135 } else {
2136 p_lh = &ugeth->ind_hash_q;
2137 p_counter = &(ugeth->numIndAddrInHash);
2138 }
2139
2140 if (!p_lh)
2141 return NULL;
2142
2143 num = *p_counter;
2144
2145 for (i = 0; i < num; i++) {
2146 enet_addr_cont =
2147 (enet_addr_container_t *)
2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2151 break;
2152 if (j == 0)
2153 return enet_addr_cont; /* Found */
2154 }
2155 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2156 }
2157 return NULL;
2158}
2159
2160static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
2161 enet_addr_t *p_enet_addr)
2162{
2163 ucc_geth_enet_address_recognition_location_e location;
2164 enet_addr_container_t *enet_addr_cont;
2165 struct list_head *p_lh;
2166 u8 i;
2167 u32 limit;
2168 u8 *p_counter;
2169
2170 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2171 p_lh = &ugeth->group_hash_q;
2172 limit = ugeth->ug_info->maxGroupAddrInHash;
2173 location =
2174 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2175 p_counter = &(ugeth->numGroupAddrInHash);
2176 } else {
2177 p_lh = &ugeth->ind_hash_q;
2178 limit = ugeth->ug_info->maxIndAddrInHash;
2179 location =
2180 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2181 p_counter = &(ugeth->numIndAddrInHash);
2182 }
2183
2184 if ((enet_addr_cont =
2185 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2186 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2187 return 0;
2188 }
2189 if ((!p_lh) || (!(*p_counter < limit)))
2190 return -EBUSY;
2191 if (!(enet_addr_cont = get_enet_addr_container()))
2192 return -ENOMEM;
2193 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2194 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2195 enet_addr_cont->location = location;
2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2197 ++(*p_counter);
2198
2199 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2200
2201 return 0;
2202}
2203
2204static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
2205 enet_addr_t *p_enet_addr)
2206{
2207 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2208 enet_addr_container_t *enet_addr_cont;
2209 ucc_fast_private_t *uccf;
2210 comm_dir_e comm_dir;
2211 u16 i, num;
2212 struct list_head *p_lh;
2213 u32 *addr_h, *addr_l;
2214 u8 *p_counter;
2215
2216 uccf = ugeth->uccf;
2217
2218 p_82xx_addr_filt =
2219 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2220 addressfiltering;
2221
2222 if (!
2223 (enet_addr_cont =
2224 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2225 return -ENOENT;
2226
2227 /* It's been found and removed from the CQ. */
2228 /* Now destroy its container */
2229 put_enet_addr_container(enet_addr_cont);
2230
2231 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2232 addr_h = &(p_82xx_addr_filt->gaddr_h);
2233 addr_l = &(p_82xx_addr_filt->gaddr_l);
2234 p_lh = &ugeth->group_hash_q;
2235 p_counter = &(ugeth->numGroupAddrInHash);
2236 } else {
2237 addr_h = &(p_82xx_addr_filt->iaddr_h);
2238 addr_l = &(p_82xx_addr_filt->iaddr_l);
2239 p_lh = &ugeth->ind_hash_q;
2240 p_counter = &(ugeth->numIndAddrInHash);
2241 }
2242
2243 comm_dir = 0;
2244 if (uccf->enabled_tx)
2245 comm_dir |= COMM_DIR_TX;
2246 if (uccf->enabled_rx)
2247 comm_dir |= COMM_DIR_RX;
2248 if (comm_dir)
2249 ugeth_disable(ugeth, comm_dir);
2250
2251 /* Clear the hash table. */
2252 out_be32(addr_h, 0x00000000);
2253 out_be32(addr_l, 0x00000000);
2254
2255 /* Add all remaining CQ elements back into hash */
2256 num = --(*p_counter);
2257 for (i = 0; i < num; i++) {
2258 enet_addr_cont =
2259 (enet_addr_container_t *)
2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2261 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2263 }
2264
2265 if (comm_dir)
2266 ugeth_enable(ugeth, comm_dir);
2267
2268 return 0;
2269}
2270#endif /* CONFIG_UGETH_FILTERING */
2271
2272static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
2273 ugeth,
2274 enet_addr_type_e
2275 enet_addr_type)
2276{
2277 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2278 ucc_fast_private_t *uccf;
2279 comm_dir_e comm_dir;
2280 struct list_head *p_lh;
2281 u16 i, num;
2282 u32 *addr_h, *addr_l;
2283 u8 *p_counter;
2284
2285 uccf = ugeth->uccf;
2286
2287 p_82xx_addr_filt =
2288 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2289 addressfiltering;
2290
2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2292 addr_h = &(p_82xx_addr_filt->gaddr_h);
2293 addr_l = &(p_82xx_addr_filt->gaddr_l);
2294 p_lh = &ugeth->group_hash_q;
2295 p_counter = &(ugeth->numGroupAddrInHash);
2296 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2297 addr_h = &(p_82xx_addr_filt->iaddr_h);
2298 addr_l = &(p_82xx_addr_filt->iaddr_l);
2299 p_lh = &ugeth->ind_hash_q;
2300 p_counter = &(ugeth->numIndAddrInHash);
2301 } else
2302 return -EINVAL;
2303
2304 comm_dir = 0;
2305 if (uccf->enabled_tx)
2306 comm_dir |= COMM_DIR_TX;
2307 if (uccf->enabled_rx)
2308 comm_dir |= COMM_DIR_RX;
2309 if (comm_dir)
2310 ugeth_disable(ugeth, comm_dir);
2311
2312 /* Clear the hash table. */
2313 out_be32(addr_h, 0x00000000);
2314 out_be32(addr_l, 0x00000000);
2315
2316 if (!p_lh)
2317 return 0;
2318
2319 num = *p_counter;
2320
2321 /* Delete all remaining CQ elements */
2322 for (i = 0; i < num; i++)
2323 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2324
2325 *p_counter = 0;
2326
2327 if (comm_dir)
2328 ugeth_enable(ugeth, comm_dir);
2329
2330 return 0;
2331}
2332
2333#ifdef CONFIG_UGETH_FILTERING
2334static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
2335 enet_addr_t *p_enet_addr,
2336 u8 paddr_num)
2337{
2338 int i;
2339
2340 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2341 ugeth_warn
2342 ("%s: multicast address added to paddr will have no "
2343 "effect - is this what you wanted?",
2344 __FUNCTION__);
2345
2346 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2347 /* store address in our database */
2348 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2349 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2350 /* put in hardware */
2351 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2352}
2353#endif /* CONFIG_UGETH_FILTERING */
2354
2355static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
2356 u8 paddr_num)
2357{
2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2360}
2361
2362static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
2363{
2364 u16 i, j;
2365 u8 *bd;
2366
2367 if (!ugeth)
2368 return;
2369
2370 if (ugeth->uccf)
2371 ucc_fast_free(ugeth->uccf);
2372
2373 if (ugeth->p_thread_data_tx) {
2374 qe_muram_free(ugeth->thread_dat_tx_offset);
2375 ugeth->p_thread_data_tx = NULL;
2376 }
2377 if (ugeth->p_thread_data_rx) {
2378 qe_muram_free(ugeth->thread_dat_rx_offset);
2379 ugeth->p_thread_data_rx = NULL;
2380 }
2381 if (ugeth->p_exf_glbl_param) {
2382 qe_muram_free(ugeth->exf_glbl_param_offset);
2383 ugeth->p_exf_glbl_param = NULL;
2384 }
2385 if (ugeth->p_rx_glbl_pram) {
2386 qe_muram_free(ugeth->rx_glbl_pram_offset);
2387 ugeth->p_rx_glbl_pram = NULL;
2388 }
2389 if (ugeth->p_tx_glbl_pram) {
2390 qe_muram_free(ugeth->tx_glbl_pram_offset);
2391 ugeth->p_tx_glbl_pram = NULL;
2392 }
2393 if (ugeth->p_send_q_mem_reg) {
2394 qe_muram_free(ugeth->send_q_mem_reg_offset);
2395 ugeth->p_send_q_mem_reg = NULL;
2396 }
2397 if (ugeth->p_scheduler) {
2398 qe_muram_free(ugeth->scheduler_offset);
2399 ugeth->p_scheduler = NULL;
2400 }
2401 if (ugeth->p_tx_fw_statistics_pram) {
2402 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2403 ugeth->p_tx_fw_statistics_pram = NULL;
2404 }
2405 if (ugeth->p_rx_fw_statistics_pram) {
2406 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2407 ugeth->p_rx_fw_statistics_pram = NULL;
2408 }
2409 if (ugeth->p_rx_irq_coalescing_tbl) {
2410 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2411 ugeth->p_rx_irq_coalescing_tbl = NULL;
2412 }
2413 if (ugeth->p_rx_bd_qs_tbl) {
2414 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2415 ugeth->p_rx_bd_qs_tbl = NULL;
2416 }
2417 if (ugeth->p_init_enet_param_shadow) {
2418 return_init_enet_entries(ugeth,
2419 &(ugeth->p_init_enet_param_shadow->
2420 rxthread[0]),
2421 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2422 ugeth->ug_info->riscRx, 1);
2423 return_init_enet_entries(ugeth,
2424 &(ugeth->p_init_enet_param_shadow->
2425 txthread[0]),
2426 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2427 ugeth->ug_info->riscTx, 0);
2428 kfree(ugeth->p_init_enet_param_shadow);
2429 ugeth->p_init_enet_param_shadow = NULL;
2430 }
2431 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2432 bd = ugeth->p_tx_bd_ring[i];
2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2434 if (ugeth->tx_skbuff[i][j]) {
2435 dma_unmap_single(NULL,
2436 BD_BUFFER_ARG(bd),
2437 (BD_STATUS_AND_LENGTH(bd) &
2438 BD_LENGTH_MASK),
2439 DMA_TO_DEVICE);
2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2441 ugeth->tx_skbuff[i][j] = NULL;
2442 }
2443 }
2444
2445 kfree(ugeth->tx_skbuff[i]);
2446
2447 if (ugeth->p_tx_bd_ring[i]) {
2448 if (ugeth->ug_info->uf_info.bd_mem_part ==
2449 MEM_PART_SYSTEM)
2450 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2451 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2452 MEM_PART_MURAM)
2453 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2454 ugeth->p_tx_bd_ring[i] = NULL;
2455 }
2456 }
2457 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2458 if (ugeth->p_rx_bd_ring[i]) {
2459 /* Return existing data buffers in ring */
2460 bd = ugeth->p_rx_bd_ring[i];
2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2462 if (ugeth->rx_skbuff[i][j]) {
2463 dma_unmap_single(NULL, BD_BUFFER(bd),
2464 ugeth->ug_info->
2465 uf_info.
2466 max_rx_buf_length +
2467 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2468 DMA_FROM_DEVICE);
2469
2470 dev_kfree_skb_any(ugeth->
2471 rx_skbuff[i][j]);
2472 ugeth->rx_skbuff[i][j] = NULL;
2473 }
2474 bd += UCC_GETH_SIZE_OF_BD;
2475 }
2476
2477 kfree(ugeth->rx_skbuff[i]);
2478
2479 if (ugeth->ug_info->uf_info.bd_mem_part ==
2480 MEM_PART_SYSTEM)
2481 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2482 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2483 MEM_PART_MURAM)
2484 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2485 ugeth->p_rx_bd_ring[i] = NULL;
2486 }
2487 }
2488 while (!list_empty(&ugeth->group_hash_q))
2489 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2490 (dequeue(&ugeth->group_hash_q)));
2491 while (!list_empty(&ugeth->ind_hash_q))
2492 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2493 (dequeue(&ugeth->ind_hash_q)));
2494
2495}
2496
2497static void ucc_geth_set_multi(struct net_device *dev)
2498{
2499 ucc_geth_private_t *ugeth;
2500 struct dev_mc_list *dmi;
2501 ucc_fast_t *uf_regs;
2502 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2503 enet_addr_t tempaddr;
2504 u8 *mcptr, *tdptr;
2505 int i, j;
2506
2507 ugeth = netdev_priv(dev);
2508
2509 uf_regs = ugeth->uccf->uf_regs;
2510
2511 if (dev->flags & IFF_PROMISC) {
2512
2513 uf_regs->upsmr |= UPSMR_PRO;
2514
2515 } else {
2516
2517 uf_regs->upsmr &= ~UPSMR_PRO;
2518
2519 p_82xx_addr_filt =
2520 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
2521 p_rx_glbl_pram->addressfiltering;
2522
2523 if (dev->flags & IFF_ALLMULTI) {
2524 /* Catch all multicast addresses, so set the
2525 * filter to all 1's.
2526 */
2527 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2528 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2529 } else {
2530 /* Clear filter and add the addresses in the list.
2531 */
2532 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2533 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2534
2535 dmi = dev->mc_list;
2536
2537 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2538
2539 /* Only support group multicast for now.
2540 */
2541 if (!(dmi->dmi_addr[0] & 1))
2542 continue;
2543
2544 /* The address in dmi_addr is LSB first,
2545 * and taddr is MSB first. We have to
2546 * copy bytes MSB first from dmi_addr.
2547 */
2548 mcptr = (u8 *) dmi->dmi_addr + 5;
2549 tdptr = (u8 *) & tempaddr;
2550 for (j = 0; j < 6; j++)
2551 *tdptr++ = *mcptr--;
2552
2553 /* Ask CPM to run CRC and set bit in
2554 * filter mask.
2555 */
2556 hw_add_addr_in_hash(ugeth, &tempaddr);
2557
2558 }
2559 }
2560 }
2561}
2562
2563static void ucc_geth_stop(ucc_geth_private_t *ugeth)
2564{
2565 ucc_geth_t *ug_regs = ugeth->ug_regs;
2566 u32 tempval;
2567
2568 ugeth_vdbg("%s: IN", __FUNCTION__);
2569
2570 /* Disable the controller */
2571 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2572
2573 /* Tell the kernel the link is down */
2574 ugeth->mii_info->link = 0;
2575 adjust_link(ugeth->dev);
2576
2577 /* Mask all interrupts */
2578 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2579
2580 /* Clear all interrupts */
2581 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2582
2583 /* Disable Rx and Tx */
2584 tempval = in_be32(&ug_regs->maccfg1);
2585 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2586 out_be32(&ug_regs->maccfg1, tempval);
2587
2588 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2589 /* Clear any pending interrupts */
2590 mii_clear_phy_interrupt(ugeth->mii_info);
2591
2592 /* Disable PHY Interrupts */
2593 mii_configure_phy_interrupt(ugeth->mii_info,
2594 MII_INTERRUPT_DISABLED);
2595 }
2596
2597 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2598
2599 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2600 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2601 } else {
2602 del_timer_sync(&ugeth->phy_info_timer);
2603 }
2604
2605 ucc_geth_memclean(ugeth);
2606}
2607
2608static int ucc_geth_startup(ucc_geth_private_t *ugeth)
2609{
2610 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2611 ucc_geth_init_pram_t *p_init_enet_pram;
2612 ucc_fast_private_t *uccf;
2613 ucc_geth_info_t *ug_info;
2614 ucc_fast_info_t *uf_info;
2615 ucc_fast_t *uf_regs;
2616 ucc_geth_t *ug_regs;
2617 int ret_val = -EINVAL;
2618 u32 remoder = UCC_GETH_REMODER_INIT;
2619 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2620 u32 ifstat, i, j, size, l2qt, l3qt, length;
2621 u16 temoder = UCC_GETH_TEMODER_INIT;
2622 u16 test;
2623 u8 function_code = 0;
2624 u8 *bd, *endOfRing;
2625 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2626
2627 ugeth_vdbg("%s: IN", __FUNCTION__);
2628
2629 ug_info = ugeth->ug_info;
2630 uf_info = &ug_info->uf_info;
2631
2632 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2633 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2634 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2635 return -EINVAL;
2636 }
2637
2638 /* Rx BD lengths */
2639 for (i = 0; i < ug_info->numQueuesRx; i++) {
2640 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2641 (ug_info->bdRingLenRx[i] %
2642 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2643 ugeth_err
2644 ("%s: Rx BD ring length must be multiple of 4,"
2645 " no smaller than 8.", __FUNCTION__);
2646 return -EINVAL;
2647 }
2648 }
2649
2650 /* Tx BD lengths */
2651 for (i = 0; i < ug_info->numQueuesTx; i++) {
2652 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2653 ugeth_err
2654 ("%s: Tx BD ring length must be no smaller than 2.",
2655 __FUNCTION__);
2656 return -EINVAL;
2657 }
2658 }
2659
2660 /* mrblr */
2661 if ((uf_info->max_rx_buf_length == 0) ||
2662 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2663 ugeth_err
2664 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2665 __FUNCTION__);
2666 return -EINVAL;
2667 }
2668
2669 /* num Tx queues */
2670 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2671 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2672 return -EINVAL;
2673 }
2674
2675 /* num Rx queues */
2676 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2677 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2678 return -EINVAL;
2679 }
2680
2681 /* l2qt */
2682 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2683 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2684 ugeth_err
2685 ("%s: VLAN priority table entry must not be"
2686 " larger than number of Rx queues.",
2687 __FUNCTION__);
2688 return -EINVAL;
2689 }
2690 }
2691
2692 /* l3qt */
2693 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2694 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2695 ugeth_err
2696 ("%s: IP priority table entry must not be"
2697 " larger than number of Rx queues.",
2698 __FUNCTION__);
2699 return -EINVAL;
2700 }
2701 }
2702
2703 if (ug_info->cam && !ug_info->ecamptr) {
2704 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2705 __FUNCTION__);
2706 return -EINVAL;
2707 }
2708
2709 if ((ug_info->numStationAddresses !=
2710 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2711 && ug_info->rxExtendedFiltering) {
2712 ugeth_err("%s: Number of station addresses greater than 1 "
2713 "not allowed in extended parsing mode.",
2714 __FUNCTION__);
2715 return -EINVAL;
2716 }
2717
2718 /* Generate uccm_mask for receive */
2719 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2720 for (i = 0; i < ug_info->numQueuesRx; i++)
2721 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2722
2723 for (i = 0; i < ug_info->numQueuesTx; i++)
2724 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2725 /* Initialize the general fast UCC block. */
2726 if (ucc_fast_init(uf_info, &uccf)) {
2727 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2728 ucc_geth_memclean(ugeth);
2729 return -ENOMEM;
2730 }
2731 ugeth->uccf = uccf;
2732
2733 switch (ug_info->numThreadsRx) {
2734 case UCC_GETH_NUM_OF_THREADS_1:
2735 numThreadsRxNumerical = 1;
2736 break;
2737 case UCC_GETH_NUM_OF_THREADS_2:
2738 numThreadsRxNumerical = 2;
2739 break;
2740 case UCC_GETH_NUM_OF_THREADS_4:
2741 numThreadsRxNumerical = 4;
2742 break;
2743 case UCC_GETH_NUM_OF_THREADS_6:
2744 numThreadsRxNumerical = 6;
2745 break;
2746 case UCC_GETH_NUM_OF_THREADS_8:
2747 numThreadsRxNumerical = 8;
2748 break;
2749 default:
2750 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2751 ucc_geth_memclean(ugeth);
2752 return -EINVAL;
2753 break;
2754 }
2755
2756 switch (ug_info->numThreadsTx) {
2757 case UCC_GETH_NUM_OF_THREADS_1:
2758 numThreadsTxNumerical = 1;
2759 break;
2760 case UCC_GETH_NUM_OF_THREADS_2:
2761 numThreadsTxNumerical = 2;
2762 break;
2763 case UCC_GETH_NUM_OF_THREADS_4:
2764 numThreadsTxNumerical = 4;
2765 break;
2766 case UCC_GETH_NUM_OF_THREADS_6:
2767 numThreadsTxNumerical = 6;
2768 break;
2769 case UCC_GETH_NUM_OF_THREADS_8:
2770 numThreadsTxNumerical = 8;
2771 break;
2772 default:
2773 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2774 ucc_geth_memclean(ugeth);
2775 return -EINVAL;
2776 break;
2777 }
2778
2779 /* Calculate rx_extended_features */
2780 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2781 ug_info->ipAddressAlignment ||
2782 (ug_info->numStationAddresses !=
2783 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2784
2785 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2786 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2787 || (ug_info->vlanOperationNonTagged !=
2788 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2789
2790 uf_regs = uccf->uf_regs;
2791 ug_regs = (ucc_geth_t *) (uccf->uf_regs);
2792 ugeth->ug_regs = ug_regs;
2793
2794 init_default_reg_vals(&uf_regs->upsmr,
2795 &ug_regs->maccfg1, &ug_regs->maccfg2);
2796
2797 /* Set UPSMR */
2798 /* For more details see the hardware spec. */
2799 init_rx_parameters(ug_info->bro,
2800 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2801
2802 /* We're going to ignore other registers for now, */
2803 /* except as needed to get up and running */
2804
2805 /* Set MACCFG1 */
2806 /* For more details see the hardware spec. */
2807 init_flow_control_params(ug_info->aufc,
2808 ug_info->receiveFlowControl,
2809 1,
2810 ug_info->pausePeriod,
2811 ug_info->extensionField,
2812 &uf_regs->upsmr,
2813 &ug_regs->uempr, &ug_regs->maccfg1);
2814
2815 maccfg1 = in_be32(&ug_regs->maccfg1);
2816 maccfg1 |= MACCFG1_ENABLE_RX;
2817 maccfg1 |= MACCFG1_ENABLE_TX;
2818 out_be32(&ug_regs->maccfg1, maccfg1);
2819
2820 /* Set IPGIFG */
2821 /* For more details see the hardware spec. */
2822 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2823 ug_info->nonBackToBackIfgPart2,
2824 ug_info->
2825 miminumInterFrameGapEnforcement,
2826 ug_info->backToBackInterFrameGap,
2827 &ug_regs->ipgifg);
2828 if (ret_val != 0) {
2829 ugeth_err("%s: IPGIFG initialization parameter too large.",
2830 __FUNCTION__);
2831 ucc_geth_memclean(ugeth);
2832 return ret_val;
2833 }
2834
2835 /* Set HAFDUP */
2836 /* For more details see the hardware spec. */
2837 ret_val = init_half_duplex_params(ug_info->altBeb,
2838 ug_info->backPressureNoBackoff,
2839 ug_info->noBackoff,
2840 ug_info->excessDefer,
2841 ug_info->altBebTruncation,
2842 ug_info->maxRetransmission,
2843 ug_info->collisionWindow,
2844 &ug_regs->hafdup);
2845 if (ret_val != 0) {
2846 ugeth_err("%s: Half Duplex initialization parameter too large.",
2847 __FUNCTION__);
2848 ucc_geth_memclean(ugeth);
2849 return ret_val;
2850 }
2851
2852 /* Set IFSTAT */
2853 /* For more details see the hardware spec. */
2854 /* Read only - resets upon read */
2855 ifstat = in_be32(&ug_regs->ifstat);
2856
2857 /* Clear UEMPR */
2858 /* For more details see the hardware spec. */
2859 out_be32(&ug_regs->uempr, 0);
2860
2861 /* Set UESCR */
2862 /* For more details see the hardware spec. */
2863 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2864 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2865 0, &uf_regs->upsmr, &ug_regs->uescr);
2866
2867 /* Allocate Tx bds */
2868 for (j = 0; j < ug_info->numQueuesTx; j++) {
2869 /* Allocate in multiple of
2870 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2871 according to spec */
2872 length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
2873 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2874 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2875 if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
2876 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2877 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2878 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2879 u32 align = 4;
2880 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2881 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2882 ugeth->tx_bd_ring_offset[j] =
2883 (u32) (kmalloc((u32) (length + align),
2884 GFP_KERNEL));
2885 if (ugeth->tx_bd_ring_offset[j] != 0)
2886 ugeth->p_tx_bd_ring[j] =
2887 (void*)((ugeth->tx_bd_ring_offset[j] +
2888 align) & ~(align - 1));
2889 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2890 ugeth->tx_bd_ring_offset[j] =
2891 qe_muram_alloc(length,
2892 UCC_GETH_TX_BD_RING_ALIGNMENT);
2893 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2894 ugeth->p_tx_bd_ring[j] =
2895 (u8 *) qe_muram_addr(ugeth->
2896 tx_bd_ring_offset[j]);
2897 }
2898 if (!ugeth->p_tx_bd_ring[j]) {
2899 ugeth_err
2900 ("%s: Can not allocate memory for Tx bd rings.",
2901 __FUNCTION__);
2902 ucc_geth_memclean(ugeth);
2903 return -ENOMEM;
2904 }
2905 /* Zero unused end of bd ring, according to spec */
2906 memset(ugeth->p_tx_bd_ring[j] +
2907 ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
2908 length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
2909 }
2910
2911 /* Allocate Rx bds */
2912 for (j = 0; j < ug_info->numQueuesRx; j++) {
2913 length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
2914 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2915 u32 align = 4;
2916 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2917 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2918 ugeth->rx_bd_ring_offset[j] =
2919 (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
2920 if (ugeth->rx_bd_ring_offset[j] != 0)
2921 ugeth->p_rx_bd_ring[j] =
2922 (void*)((ugeth->rx_bd_ring_offset[j] +
2923 align) & ~(align - 1));
2924 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2925 ugeth->rx_bd_ring_offset[j] =
2926 qe_muram_alloc(length,
2927 UCC_GETH_RX_BD_RING_ALIGNMENT);
2928 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2929 ugeth->p_rx_bd_ring[j] =
2930 (u8 *) qe_muram_addr(ugeth->
2931 rx_bd_ring_offset[j]);
2932 }
2933 if (!ugeth->p_rx_bd_ring[j]) {
2934 ugeth_err
2935 ("%s: Can not allocate memory for Rx bd rings.",
2936 __FUNCTION__);
2937 ucc_geth_memclean(ugeth);
2938 return -ENOMEM;
2939 }
2940 }
2941
2942 /* Init Tx bds */
2943 for (j = 0; j < ug_info->numQueuesTx; j++) {
2944 /* Setup the skbuff rings */
2945 ugeth->tx_skbuff[j] =
2946 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2947 ugeth->ug_info->bdRingLenTx[j],
2948 GFP_KERNEL);
2949
2950 if (ugeth->tx_skbuff[j] == NULL) {
2951 ugeth_err("%s: Could not allocate tx_skbuff",
2952 __FUNCTION__);
2953 ucc_geth_memclean(ugeth);
2954 return -ENOMEM;
2955 }
2956
2957 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2958 ugeth->tx_skbuff[j][i] = NULL;
2959
2960 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2961 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2962 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2963 BD_BUFFER_CLEAR(bd);
2964 BD_STATUS_AND_LENGTH_SET(bd, 0);
2965 bd += UCC_GETH_SIZE_OF_BD;
2966 }
2967 bd -= UCC_GETH_SIZE_OF_BD;
2968 BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
2969 }
2970
2971 /* Init Rx bds */
2972 for (j = 0; j < ug_info->numQueuesRx; j++) {
2973 /* Setup the skbuff rings */
2974 ugeth->rx_skbuff[j] =
2975 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2976 ugeth->ug_info->bdRingLenRx[j],
2977 GFP_KERNEL);
2978
2979 if (ugeth->rx_skbuff[j] == NULL) {
2980 ugeth_err("%s: Could not allocate rx_skbuff",
2981 __FUNCTION__);
2982 ucc_geth_memclean(ugeth);
2983 return -ENOMEM;
2984 }
2985
2986 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2987 ugeth->rx_skbuff[j][i] = NULL;
2988
2989 ugeth->skb_currx[j] = 0;
2990 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2991 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2992 BD_STATUS_AND_LENGTH_SET(bd, R_I);
2993 BD_BUFFER_CLEAR(bd);
2994 bd += UCC_GETH_SIZE_OF_BD;
2995 }
2996 bd -= UCC_GETH_SIZE_OF_BD;
2997 BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
2998 }
2999
3000 /*
3001 * Global PRAM
3002 */
3003 /* Tx global PRAM */
3004 /* Allocate global tx parameter RAM page */
3005 ugeth->tx_glbl_pram_offset =
3006 qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
3007 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
3008 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
3009 ugeth_err
3010 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
3011 __FUNCTION__);
3012 ucc_geth_memclean(ugeth);
3013 return -ENOMEM;
3014 }
3015 ugeth->p_tx_glbl_pram =
3016 (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
3017 tx_glbl_pram_offset);
3018 /* Zero out p_tx_glbl_pram */
3019 memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
3020
3021 /* Fill global PRAM */
3022
3023 /* TQPTR */
3024 /* Size varies with number of Tx threads */
3025 ugeth->thread_dat_tx_offset =
3026 qe_muram_alloc(numThreadsTxNumerical *
3027 sizeof(ucc_geth_thread_data_tx_t) +
3028 32 * (numThreadsTxNumerical == 1),
3029 UCC_GETH_THREAD_DATA_ALIGNMENT);
3030 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
3031 ugeth_err
3032 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
3033 __FUNCTION__);
3034 ucc_geth_memclean(ugeth);
3035 return -ENOMEM;
3036 }
3037
3038 ugeth->p_thread_data_tx =
3039 (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
3040 thread_dat_tx_offset);
3041 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
3042
3043 /* vtagtable */
3044 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
3045 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
3046 ug_info->vtagtable[i]);
3047
3048 /* iphoffset */
3049 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
3050 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
3051
3052 /* SQPTR */
3053 /* Size varies with number of Tx queues */
3054 ugeth->send_q_mem_reg_offset =
3055 qe_muram_alloc(ug_info->numQueuesTx *
3056 sizeof(ucc_geth_send_queue_qd_t),
3057 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
3058 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
3059 ugeth_err
3060 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
3061 __FUNCTION__);
3062 ucc_geth_memclean(ugeth);
3063 return -ENOMEM;
3064 }
3065
3066 ugeth->p_send_q_mem_reg =
3067 (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
3068 send_q_mem_reg_offset);
3069 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
3070
3071 /* Setup the table */
3072 /* Assume BD rings are already established */
3073 for (i = 0; i < ug_info->numQueuesTx; i++) {
3074 endOfRing =
3075 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3076 1) * UCC_GETH_SIZE_OF_BD;
3077 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3078 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3079 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3080 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3081 last_bd_completed_address,
3082 (u32) virt_to_phys(endOfRing));
3083 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3084 MEM_PART_MURAM) {
3085 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3086 (u32) immrbar_virt_to_phys(ugeth->
3087 p_tx_bd_ring[i]));
3088 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3089 last_bd_completed_address,
3090 (u32) immrbar_virt_to_phys(endOfRing));
3091 }
3092 }
3093
3094 /* schedulerbasepointer */
3095
3096 if (ug_info->numQueuesTx > 1) {
3097 /* scheduler exists only if more than 1 tx queue */
3098 ugeth->scheduler_offset =
3099 qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
3100 UCC_GETH_SCHEDULER_ALIGNMENT);
3101 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3102 ugeth_err
3103 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3104 __FUNCTION__);
3105 ucc_geth_memclean(ugeth);
3106 return -ENOMEM;
3107 }
3108
3109 ugeth->p_scheduler =
3110 (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
3111 scheduler_offset);
3112 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3113 ugeth->scheduler_offset);
3114 /* Zero out p_scheduler */
3115 memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
3116
3117 /* Set values in scheduler */
3118 out_be32(&ugeth->p_scheduler->mblinterval,
3119 ug_info->mblinterval);
3120 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3121 ug_info->nortsrbytetime);
3122 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3123 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3124 ugeth->p_scheduler->txasap = ug_info->txasap;
3125 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3126 for (i = 0; i < NUM_TX_QUEUES; i++)
3127 ugeth->p_scheduler->weightfactor[i] =
3128 ug_info->weightfactor[i];
3129
3130 /* Set pointers to cpucount registers in scheduler */
3131 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3132 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3133 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3134 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3135 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3136 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3137 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3138 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3139 }
3140
3141 /* schedulerbasepointer */
3142 /* TxRMON_PTR (statistics) */
3143 if (ug_info->
3144 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3145 ugeth->tx_fw_statistics_pram_offset =
3146 qe_muram_alloc(sizeof
3147 (ucc_geth_tx_firmware_statistics_pram_t),
3148 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3149 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3150 ugeth_err
3151 ("%s: Can not allocate DPRAM memory for"
3152 " p_tx_fw_statistics_pram.", __FUNCTION__);
3153 ucc_geth_memclean(ugeth);
3154 return -ENOMEM;
3155 }
3156 ugeth->p_tx_fw_statistics_pram =
3157 (ucc_geth_tx_firmware_statistics_pram_t *)
3158 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3159 /* Zero out p_tx_fw_statistics_pram */
3160 memset(ugeth->p_tx_fw_statistics_pram,
3161 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
3162 }
3163
3164 /* temoder */
3165 /* Already has speed set */
3166
3167 if (ug_info->numQueuesTx > 1)
3168 temoder |= TEMODER_SCHEDULER_ENABLE;
3169 if (ug_info->ipCheckSumGenerate)
3170 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3171 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3172 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3173
3174 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3175
3176 /* Function code register value to be used later */
3177 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3178 /* Required for QE */
3179
3180 /* function code register */
3181 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3182
3183 /* Rx global PRAM */
3184 /* Allocate global rx parameter RAM page */
3185 ugeth->rx_glbl_pram_offset =
3186 qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
3187 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3188 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3189 ugeth_err
3190 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3191 __FUNCTION__);
3192 ucc_geth_memclean(ugeth);
3193 return -ENOMEM;
3194 }
3195 ugeth->p_rx_glbl_pram =
3196 (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
3197 rx_glbl_pram_offset);
3198 /* Zero out p_rx_glbl_pram */
3199 memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
3200
3201 /* Fill global PRAM */
3202
3203 /* RQPTR */
3204 /* Size varies with number of Rx threads */
3205 ugeth->thread_dat_rx_offset =
3206 qe_muram_alloc(numThreadsRxNumerical *
3207 sizeof(ucc_geth_thread_data_rx_t),
3208 UCC_GETH_THREAD_DATA_ALIGNMENT);
3209 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3210 ugeth_err
3211 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3212 __FUNCTION__);
3213 ucc_geth_memclean(ugeth);
3214 return -ENOMEM;
3215 }
3216
3217 ugeth->p_thread_data_rx =
3218 (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
3219 thread_dat_rx_offset);
3220 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3221
3222 /* typeorlen */
3223 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3224
3225 /* rxrmonbaseptr (statistics) */
3226 if (ug_info->
3227 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3228 ugeth->rx_fw_statistics_pram_offset =
3229 qe_muram_alloc(sizeof
3230 (ucc_geth_rx_firmware_statistics_pram_t),
3231 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3232 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3233 ugeth_err
3234 ("%s: Can not allocate DPRAM memory for"
3235 " p_rx_fw_statistics_pram.", __FUNCTION__);
3236 ucc_geth_memclean(ugeth);
3237 return -ENOMEM;
3238 }
3239 ugeth->p_rx_fw_statistics_pram =
3240 (ucc_geth_rx_firmware_statistics_pram_t *)
3241 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3242 /* Zero out p_rx_fw_statistics_pram */
3243 memset(ugeth->p_rx_fw_statistics_pram, 0,
3244 sizeof(ucc_geth_rx_firmware_statistics_pram_t));
3245 }
3246
3247 /* intCoalescingPtr */
3248
3249 /* Size varies with number of Rx queues */
3250 ugeth->rx_irq_coalescing_tbl_offset =
3251 qe_muram_alloc(ug_info->numQueuesRx *
3252 sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
3253 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3254 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3255 ugeth_err
3256 ("%s: Can not allocate DPRAM memory for"
3257 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3258 ucc_geth_memclean(ugeth);
3259 return -ENOMEM;
3260 }
3261
3262 ugeth->p_rx_irq_coalescing_tbl =
3263 (ucc_geth_rx_interrupt_coalescing_table_t *)
3264 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3265 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3266 ugeth->rx_irq_coalescing_tbl_offset);
3267
3268 /* Fill interrupt coalescing table */
3269 for (i = 0; i < ug_info->numQueuesRx; i++) {
3270 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3271 interruptcoalescingmaxvalue,
3272 ug_info->interruptcoalescingmaxvalue[i]);
3273 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3274 interruptcoalescingcounter,
3275 ug_info->interruptcoalescingmaxvalue[i]);
3276 }
3277
3278 /* MRBLR */
3279 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3280 &ugeth->p_rx_glbl_pram->mrblr);
3281 /* MFLR */
3282 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3283 /* MINFLR */
3284 init_min_frame_len(ug_info->minFrameLength,
3285 &ugeth->p_rx_glbl_pram->minflr,
3286 &ugeth->p_rx_glbl_pram->mrblr);
3287 /* MAXD1 */
3288 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3289 /* MAXD2 */
3290 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3291
3292 /* l2qt */
3293 l2qt = 0;
3294 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3295 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3296 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3297
3298 /* l3qt */
3299 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3300 l3qt = 0;
3301 for (i = 0; i < 8; i++)
3302 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3303 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
3304 }
3305
3306 /* vlantype */
3307 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3308
3309 /* vlantci */
3310 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3311
3312 /* ecamptr */
3313 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3314
3315 /* RBDQPTR */
3316 /* Size varies with number of Rx queues */
3317 ugeth->rx_bd_qs_tbl_offset =
3318 qe_muram_alloc(ug_info->numQueuesRx *
3319 (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3320 sizeof(ucc_geth_rx_prefetched_bds_t)),
3321 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3322 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3323 ugeth_err
3324 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3325 __FUNCTION__);
3326 ucc_geth_memclean(ugeth);
3327 return -ENOMEM;
3328 }
3329
3330 ugeth->p_rx_bd_qs_tbl =
3331 (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
3332 rx_bd_qs_tbl_offset);
3333 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3334 /* Zero out p_rx_bd_qs_tbl */
3335 memset(ugeth->p_rx_bd_qs_tbl,
3336 0,
3337 ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3338 sizeof(ucc_geth_rx_prefetched_bds_t)));
3339
3340 /* Setup the table */
3341 /* Assume BD rings are already established */
3342 for (i = 0; i < ug_info->numQueuesRx; i++) {
3343 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3344 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3345 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3346 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3347 MEM_PART_MURAM) {
3348 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3349 (u32) immrbar_virt_to_phys(ugeth->
3350 p_rx_bd_ring[i]));
3351 }
3352 /* rest of fields handled by QE */
3353 }
3354
3355 /* remoder */
3356 /* Already has speed set */
3357
3358 if (ugeth->rx_extended_features)
3359 remoder |= REMODER_RX_EXTENDED_FEATURES;
3360 if (ug_info->rxExtendedFiltering)
3361 remoder |= REMODER_RX_EXTENDED_FILTERING;
3362 if (ug_info->dynamicMaxFrameLength)
3363 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3364 if (ug_info->dynamicMinFrameLength)
3365 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3366 remoder |=
3367 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3368 remoder |=
3369 ug_info->
3370 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3371 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3372 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3373 if (ug_info->ipCheckSumCheck)
3374 remoder |= REMODER_IP_CHECKSUM_CHECK;
3375 if (ug_info->ipAddressAlignment)
3376 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3377 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3378
3379 /* Note that this function must be called */
3380 /* ONLY AFTER p_tx_fw_statistics_pram */
3381 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3382 init_firmware_statistics_gathering_mode((ug_info->
3383 statisticsMode &
3384 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3385 (ug_info->statisticsMode &
3386 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3387 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3388 ugeth->tx_fw_statistics_pram_offset,
3389 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3390 ugeth->rx_fw_statistics_pram_offset,
3391 &ugeth->p_tx_glbl_pram->temoder,
3392 &ugeth->p_rx_glbl_pram->remoder);
3393
3394 /* function code register */
3395 ugeth->p_rx_glbl_pram->rstate = function_code;
3396
3397 /* initialize extended filtering */
3398 if (ug_info->rxExtendedFiltering) {
3399 if (!ug_info->extendedFilteringChainPointer) {
3400 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3401 __FUNCTION__);
3402 ucc_geth_memclean(ugeth);
3403 return -EINVAL;
3404 }
3405
3406 /* Allocate memory for extended filtering Mode Global
3407 Parameters */
3408 ugeth->exf_glbl_param_offset =
3409 qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
3410 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3411 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3412 ugeth_err
3413 ("%s: Can not allocate DPRAM memory for"
3414 " p_exf_glbl_param.", __FUNCTION__);
3415 ucc_geth_memclean(ugeth);
3416 return -ENOMEM;
3417 }
3418
3419 ugeth->p_exf_glbl_param =
3420 (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
3421 exf_glbl_param_offset);
3422 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3423 ugeth->exf_glbl_param_offset);
3424 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3425 (u32) ug_info->extendedFilteringChainPointer);
3426
3427 } else { /* initialize 82xx style address filtering */
3428
3429 /* Init individual address recognition registers to disabled */
3430
3431 for (j = 0; j < NUM_OF_PADDRS; j++)
3432 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3433
3434 /* Create CQs for hash tables */
3435 if (ug_info->maxGroupAddrInHash > 0) {
3436 INIT_LIST_HEAD(&ugeth->group_hash_q);
3437 }
3438 if (ug_info->maxIndAddrInHash > 0) {
3439 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3440 }
3441 p_82xx_addr_filt =
3442 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
3443 p_rx_glbl_pram->addressfiltering;
3444
3445 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3446 ENET_ADDR_TYPE_GROUP);
3447 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3448 ENET_ADDR_TYPE_INDIVIDUAL);
3449 }
3450
3451 /*
3452 * Initialize UCC at QE level
3453 */
3454
3455 command = QE_INIT_TX_RX;
3456
3457 /* Allocate shadow InitEnet command parameter structure.
3458 * This is needed because after the InitEnet command is executed,
3459 * the structure in DPRAM is released, because DPRAM is a premium
3460 * resource.
3461 * This shadow structure keeps a copy of what was done so that the
3462 * allocated resources can be released when the channel is freed.
3463 */
3464 if (!(ugeth->p_init_enet_param_shadow =
3465 (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
3466 GFP_KERNEL))) {
3467 ugeth_err
3468 ("%s: Can not allocate memory for"
3469 " p_UccInitEnetParamShadows.", __FUNCTION__);
3470 ucc_geth_memclean(ugeth);
3471 return -ENOMEM;
3472 }
3473 /* Zero out *p_init_enet_param_shadow */
3474 memset((char *)ugeth->p_init_enet_param_shadow,
3475 0, sizeof(ucc_geth_init_pram_t));
3476
3477 /* Fill shadow InitEnet command parameter structure */
3478
3479 ugeth->p_init_enet_param_shadow->resinit1 =
3480 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3481 ugeth->p_init_enet_param_shadow->resinit2 =
3482 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3483 ugeth->p_init_enet_param_shadow->resinit3 =
3484 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3485 ugeth->p_init_enet_param_shadow->resinit4 =
3486 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3487 ugeth->p_init_enet_param_shadow->resinit5 =
3488 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3489 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3490 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3491 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3492 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3493
3494 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3495 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3496 if ((ug_info->largestexternallookupkeysize !=
3497 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3498 && (ug_info->largestexternallookupkeysize !=
3499 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3500 && (ug_info->largestexternallookupkeysize !=
3501 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3502 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3503 __FUNCTION__);
3504 ucc_geth_memclean(ugeth);
3505 return -EINVAL;
3506 }
3507 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3508 ug_info->largestexternallookupkeysize;
3509 size = sizeof(ucc_geth_thread_rx_pram_t);
3510 if (ug_info->rxExtendedFiltering) {
3511 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3512 if (ug_info->largestexternallookupkeysize ==
3513 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3514 size +=
3515 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3516 if (ug_info->largestexternallookupkeysize ==
3517 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3518 size +=
3519 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3520 }
3521
3522 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3523 p_init_enet_param_shadow->rxthread[0]),
3524 (u8) (numThreadsRxNumerical + 1)
3525 /* Rx needs one extra for terminator */
3526 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3527 ug_info->riscRx, 1)) != 0) {
3528 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3529 __FUNCTION__);
3530 ucc_geth_memclean(ugeth);
3531 return ret_val;
3532 }
3533
3534 ugeth->p_init_enet_param_shadow->txglobal =
3535 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3536 if ((ret_val =
3537 fill_init_enet_entries(ugeth,
3538 &(ugeth->p_init_enet_param_shadow->
3539 txthread[0]), numThreadsTxNumerical,
3540 sizeof(ucc_geth_thread_tx_pram_t),
3541 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3542 ug_info->riscTx, 0)) != 0) {
3543 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3544 __FUNCTION__);
3545 ucc_geth_memclean(ugeth);
3546 return ret_val;
3547 }
3548
3549 /* Load Rx bds with buffers */
3550 for (i = 0; i < ug_info->numQueuesRx; i++) {
3551 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3552 ugeth_err("%s: Can not fill Rx bds with buffers.",
3553 __FUNCTION__);
3554 ucc_geth_memclean(ugeth);
3555 return ret_val;
3556 }
3557 }
3558
3559 /* Allocate InitEnet command parameter structure */
3560 init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
3561 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3562 ugeth_err
3563 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3564 __FUNCTION__);
3565 ucc_geth_memclean(ugeth);
3566 return -ENOMEM;
3567 }
3568 p_init_enet_pram =
3569 (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
3570
3571 /* Copy shadow InitEnet command parameter structure into PRAM */
3572 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3573 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3574 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3575 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3576 out_be16(&p_init_enet_pram->resinit5,
3577 ugeth->p_init_enet_param_shadow->resinit5);
3578 p_init_enet_pram->largestexternallookupkeysize =
3579 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3580 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3581 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3582 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3583 out_be32(&p_init_enet_pram->rxthread[i],
3584 ugeth->p_init_enet_param_shadow->rxthread[i]);
3585 out_be32(&p_init_enet_pram->txglobal,
3586 ugeth->p_init_enet_param_shadow->txglobal);
3587 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3588 out_be32(&p_init_enet_pram->txthread[i],
3589 ugeth->p_init_enet_param_shadow->txthread[i]);
3590
3591 /* Issue QE command */
3592 cecr_subblock =
3593 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3594 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
3595 init_enet_pram_offset);
3596
3597 /* Free InitEnet command parameter */
3598 qe_muram_free(init_enet_pram_offset);
3599
3600 return 0;
3601}
3602
3603/* returns a net_device_stats structure pointer */
3604static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3605{
3606 ucc_geth_private_t *ugeth = netdev_priv(dev);
3607
3608 return &(ugeth->stats);
3609}
3610
3611/* ucc_geth_timeout gets called when a packet has not been
3612 * transmitted after a set amount of time.
3613 * For now, assume that clearing out all the structures, and
3614 * starting over will fix the problem. */
3615static void ucc_geth_timeout(struct net_device *dev)
3616{
3617 ucc_geth_private_t *ugeth = netdev_priv(dev);
3618
3619 ugeth_vdbg("%s: IN", __FUNCTION__);
3620
3621 ugeth->stats.tx_errors++;
3622
3623 ugeth_dump_regs(ugeth);
3624
3625 if (dev->flags & IFF_UP) {
3626 ucc_geth_stop(ugeth);
3627 ucc_geth_startup(ugeth);
3628 }
3629
3630 netif_schedule(dev);
3631}
3632
3633/* This is called by the kernel when a frame is ready for transmission. */
3634/* It is pointed to by the dev->hard_start_xmit function pointer */
3635static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3636{
3637 ucc_geth_private_t *ugeth = netdev_priv(dev);
3638 u8 *bd; /* BD pointer */
3639 u32 bd_status;
3640 u8 txQ = 0;
3641
3642 ugeth_vdbg("%s: IN", __FUNCTION__);
3643
3644 spin_lock_irq(&ugeth->lock);
3645
3646 ugeth->stats.tx_bytes += skb->len;
3647
3648 /* Start from the next BD that should be filled */
3649 bd = ugeth->txBd[txQ];
3650 bd_status = BD_STATUS_AND_LENGTH(bd);
3651 /* Save the skb pointer so we can free it later */
3652 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3653
3654 /* Update the current skb pointer (wrapping if this was the last) */
3655 ugeth->skb_curtx[txQ] =
3656 (ugeth->skb_curtx[txQ] +
3657 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3658
3659 /* set up the buffer descriptor */
3660 BD_BUFFER_SET(bd,
3661 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3662
3663 //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
3664
3665 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3666
3667 BD_STATUS_AND_LENGTH_SET(bd, bd_status);
3668
3669 dev->trans_start = jiffies;
3670
3671 /* Move to next BD in the ring */
3672 if (!(bd_status & T_W))
3673 ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
3674 else
3675 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3676
3677 /* If the next BD still needs to be cleaned up, then the bds
3678 are full. We need to tell the kernel to stop sending us stuff. */
3679 if (bd == ugeth->confBd[txQ]) {
3680 if (!netif_queue_stopped(dev))
3681 netif_stop_queue(dev);
3682 }
3683
3684 if (ugeth->p_scheduler) {
3685 ugeth->cpucount[txQ]++;
3686 /* Indicate to QE that there are more Tx bds ready for
3687 transmission */
3688 /* This is done by writing a running counter of the bd
3689 count to the scheduler PRAM. */
3690 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3691 }
3692
3693 spin_unlock_irq(&ugeth->lock);
3694
3695 return 0;
3696}
3697
3698static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
3699{
3700 struct sk_buff *skb;
3701 u8 *bd;
3702 u16 length, howmany = 0;
3703 u32 bd_status;
3704 u8 *bdBuffer;
3705
3706 ugeth_vdbg("%s: IN", __FUNCTION__);
3707
3708 spin_lock(&ugeth->lock);
3709 /* collect received buffers */
3710 bd = ugeth->rxBd[rxQ];
3711
3712 bd_status = BD_STATUS_AND_LENGTH(bd);
3713
3714 /* while there are received buffers and BD is full (~R_E) */
3715 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3716 bdBuffer = (u8 *) BD_BUFFER(bd);
3717 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3718 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3719
3720 /* determine whether buffer is first, last, first and last
3721 (single buffer frame) or middle (not first and not last) */
3722 if (!skb ||
3723 (!(bd_status & (R_F | R_L))) ||
3724 (bd_status & R_ERRORS_FATAL)) {
3725 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3726 __FUNCTION__, __LINE__, (u32) skb);
3727 if (skb)
3728 dev_kfree_skb_any(skb);
3729
3730 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3731 ugeth->stats.rx_dropped++;
3732 } else {
3733 ugeth->stats.rx_packets++;
3734 howmany++;
3735
3736 /* Prep the skb for the packet */
3737 skb_put(skb, length);
3738
3739 /* Tell the skb what kind of packet this is */
3740 skb->protocol = eth_type_trans(skb, ugeth->dev);
3741
3742 ugeth->stats.rx_bytes += length;
3743 /* Send the packet up the stack */
3744#ifdef CONFIG_UGETH_NAPI
3745 netif_receive_skb(skb);
3746#else
3747 netif_rx(skb);
3748#endif /* CONFIG_UGETH_NAPI */
3749 }
3750
3751 ugeth->dev->last_rx = jiffies;
3752
3753 skb = get_new_skb(ugeth, bd);
3754 if (!skb) {
3755 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3756 spin_unlock(&ugeth->lock);
3757 ugeth->stats.rx_dropped++;
3758 break;
3759 }
3760
3761 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3762
3763 /* update to point at the next skb */
3764 ugeth->skb_currx[rxQ] =
3765 (ugeth->skb_currx[rxQ] +
3766 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3767
3768 if (bd_status & R_W)
3769 bd = ugeth->p_rx_bd_ring[rxQ];
3770 else
3771 bd += UCC_GETH_SIZE_OF_BD;
3772
3773 bd_status = BD_STATUS_AND_LENGTH(bd);
3774 }
3775
3776 ugeth->rxBd[rxQ] = bd;
3777 spin_unlock(&ugeth->lock);
3778 return howmany;
3779}
3780
3781static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3782{
3783 /* Start from the next BD that should be filled */
3784 ucc_geth_private_t *ugeth = netdev_priv(dev);
3785 u8 *bd; /* BD pointer */
3786 u32 bd_status;
3787
3788 bd = ugeth->confBd[txQ];
3789 bd_status = BD_STATUS_AND_LENGTH(bd);
3790
3791 /* Normal processing. */
3792 while ((bd_status & T_R) == 0) {
3793 /* BD contains already transmitted buffer. */
3794 /* Handle the transmitted buffer and release */
3795 /* the BD to be used with the current frame */
3796
3797 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3798 break;
3799
3800 ugeth->stats.tx_packets++;
3801
3802 /* Free the sk buffer associated with this TxBD */
3803 dev_kfree_skb_irq(ugeth->
3804 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3805 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3806 ugeth->skb_dirtytx[txQ] =
3807 (ugeth->skb_dirtytx[txQ] +
3808 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3809
3810 /* We freed a buffer, so now we can restart transmission */
3811 if (netif_queue_stopped(dev))
3812 netif_wake_queue(dev);
3813
3814 /* Advance the confirmation BD pointer */
3815 if (!(bd_status & T_W))
3816 ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
3817 else
3818 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3819 }
3820 return 0;
3821}
3822
3823#ifdef CONFIG_UGETH_NAPI
3824static int ucc_geth_poll(struct net_device *dev, int *budget)
3825{
3826 ucc_geth_private_t *ugeth = netdev_priv(dev);
3827 int howmany;
3828 int rx_work_limit = *budget;
3829 u8 rxQ = 0;
3830
3831 if (rx_work_limit > dev->quota)
3832 rx_work_limit = dev->quota;
3833
3834 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3835
3836 dev->quota -= howmany;
3837 rx_work_limit -= howmany;
3838 *budget -= howmany;
3839
3840 if (rx_work_limit >= 0)
3841 netif_rx_complete(dev);
3842
3843 return (rx_work_limit < 0) ? 1 : 0;
3844}
3845#endif /* CONFIG_UGETH_NAPI */
3846
3847static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
3848 struct pt_regs *regs)
3849{
3850 struct net_device *dev = (struct net_device *)info;
3851 ucc_geth_private_t *ugeth = netdev_priv(dev);
3852 ucc_fast_private_t *uccf;
3853 ucc_geth_info_t *ug_info;
3854 register u32 ucce = 0;
3855 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3856 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3857 register u8 i;
3858
3859 ugeth_vdbg("%s: IN", __FUNCTION__);
3860
3861 if (!ugeth)
3862 return IRQ_NONE;
3863
3864 uccf = ugeth->uccf;
3865 ug_info = ugeth->ug_info;
3866
3867 do {
3868 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3869
3870 /* clear event bits for next time */
3871 /* Side effect here is to mask ucce variable
3872 for future processing below. */
3873 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3874 but only bits in UCCM */
3875
3876 /* We ignore Tx interrupts because Tx confirmation is
3877 done inside Tx routine */
3878
3879 for (i = 0; i < ug_info->numQueuesRx; i++) {
3880 if (ucce & bit_mask)
3881 ucc_geth_rx(ugeth, i,
3882 (int)ugeth->ug_info->
3883 bdRingLenRx[i]);
3884 ucce &= ~bit_mask;
3885 bit_mask <<= 1;
3886 }
3887
3888 for (i = 0; i < ug_info->numQueuesTx; i++) {
3889 if (ucce & tx_mask)
3890 ucc_geth_tx(dev, i);
3891 ucce &= ~tx_mask;
3892 tx_mask <<= 1;
3893 }
3894
3895 /* Exceptions */
3896 if (ucce & UCCE_BSY) {
3897 ugeth_vdbg("Got BUSY irq!!!!");
3898 ugeth->stats.rx_errors++;
3899 ucce &= ~UCCE_BSY;
3900 }
3901 if (ucce & UCCE_OTHER) {
3902 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3903 ucce);
3904 ugeth->stats.rx_errors++;
3905 ucce &= ~ucce;
3906 }
3907 }
3908 while (ucce);
3909
3910 return IRQ_HANDLED;
3911}
3912
3913static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3914{
3915 struct net_device *dev = (struct net_device *)dev_id;
3916 ucc_geth_private_t *ugeth = netdev_priv(dev);
3917
3918 ugeth_vdbg("%s: IN", __FUNCTION__);
3919
3920 /* Clear the interrupt */
3921 mii_clear_phy_interrupt(ugeth->mii_info);
3922
3923 /* Disable PHY interrupts */
3924 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3925
3926 /* Schedule the phy change */
3927 schedule_work(&ugeth->tq);
3928
3929 return IRQ_HANDLED;
3930}
3931
3932/* Scheduled by the phy_interrupt/timer to handle PHY changes */
3933static void ugeth_phy_change(void *data)
3934{
3935 struct net_device *dev = (struct net_device *)data;
3936 ucc_geth_private_t *ugeth = netdev_priv(dev);
3937 ucc_geth_t *ug_regs;
3938 int result = 0;
3939
3940 ugeth_vdbg("%s: IN", __FUNCTION__);
3941
3942 ug_regs = ugeth->ug_regs;
3943
3944 /* Delay to give the PHY a chance to change the
3945 * register state */
3946 msleep(1);
3947
3948 /* Update the link, speed, duplex */
3949 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3950
3951 /* Adjust the known status as long as the link
3952 * isn't still coming up */
3953 if ((0 == result) || (ugeth->mii_info->link == 0))
3954 adjust_link(dev);
3955
3956 /* Reenable interrupts, if needed */
3957 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3958 mii_configure_phy_interrupt(ugeth->mii_info,
3959 MII_INTERRUPT_ENABLED);
3960}
3961
3962/* Called every so often on systems that don't interrupt
3963 * the core for PHY changes */
3964static void ugeth_phy_timer(unsigned long data)
3965{
3966 struct net_device *dev = (struct net_device *)data;
3967 ucc_geth_private_t *ugeth = netdev_priv(dev);
3968
3969 schedule_work(&ugeth->tq);
3970
3971 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3972}
3973
3974/* Keep trying aneg for some time
3975 * If, after GFAR_AN_TIMEOUT seconds, it has not
3976 * finished, we switch to forced.
3977 * Either way, once the process has completed, we either
3978 * request the interrupt, or switch the timer over to
3979 * using ugeth_phy_timer to check status */
3980static void ugeth_phy_startup_timer(unsigned long data)
3981{
3982 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3983 ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
3984 static int secondary = UGETH_AN_TIMEOUT;
3985 int result;
3986
3987 /* Configure the Auto-negotiation */
3988 result = mii_info->phyinfo->config_aneg(mii_info);
3989
3990 /* If autonegotiation failed to start, and
3991 * we haven't timed out, reset the timer, and return */
3992 if (result && secondary--) {
3993 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3994 return;
3995 } else if (result) {
3996 /* Couldn't start autonegotiation.
3997 * Try switching to forced */
3998 mii_info->autoneg = 0;
3999 result = mii_info->phyinfo->config_aneg(mii_info);
4000
4001 /* Forcing failed! Give up */
4002 if (result) {
4003 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
4004 return;
4005 }
4006 }
4007
4008 /* Kill the timer so it can be restarted */
4009 del_timer_sync(&ugeth->phy_info_timer);
4010
4011 /* Grab the PHY interrupt, if necessary/possible */
4012 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
4013 if (request_irq(ugeth->ug_info->phy_interrupt,
4014 phy_interrupt,
4015 SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
4016 ugeth_err("%s: Can't get IRQ %d (PHY)",
4017 mii_info->dev->name,
4018 ugeth->ug_info->phy_interrupt);
4019 } else {
4020 mii_configure_phy_interrupt(ugeth->mii_info,
4021 MII_INTERRUPT_ENABLED);
4022 return;
4023 }
4024 }
4025
4026 /* Start the timer again, this time in order to
4027 * handle a change in status */
4028 init_timer(&ugeth->phy_info_timer);
4029 ugeth->phy_info_timer.function = &ugeth_phy_timer;
4030 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
4031 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
4032}
4033
4034/* Called when something needs to use the ethernet device */
4035/* Returns 0 for success. */
4036static int ucc_geth_open(struct net_device *dev)
4037{
4038 ucc_geth_private_t *ugeth = netdev_priv(dev);
4039 int err;
4040
4041 ugeth_vdbg("%s: IN", __FUNCTION__);
4042
4043 /* Test station address */
4044 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
4045 ugeth_err("%s: Multicast address used for station address"
4046 " - is this what you wanted?", __FUNCTION__);
4047 return -EINVAL;
4048 }
4049
4050 err = ucc_geth_startup(ugeth);
4051 if (err) {
4052 ugeth_err("%s: Cannot configure net device, aborting.",
4053 dev->name);
4054 return err;
4055 }
4056
4057 err = adjust_enet_interface(ugeth);
4058 if (err) {
4059 ugeth_err("%s: Cannot configure net device, aborting.",
4060 dev->name);
4061 return err;
4062 }
4063
4064 /* Set MACSTNADDR1, MACSTNADDR2 */
4065 /* For more details see the hardware spec. */
4066 init_mac_station_addr_regs(dev->dev_addr[0],
4067 dev->dev_addr[1],
4068 dev->dev_addr[2],
4069 dev->dev_addr[3],
4070 dev->dev_addr[4],
4071 dev->dev_addr[5],
4072 &ugeth->ug_regs->macstnaddr1,
4073 &ugeth->ug_regs->macstnaddr2);
4074
4075 err = init_phy(dev);
4076 if (err) {
4077 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4078 return err;
4079 }
4080#ifndef CONFIG_UGETH_NAPI
4081 err =
4082 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4083 "UCC Geth", dev);
4084 if (err) {
4085 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4086 dev->name);
4087 ucc_geth_stop(ugeth);
4088 return err;
4089 }
4090#endif /* CONFIG_UGETH_NAPI */
4091
4092 /* Set up the PHY change work queue */
4093 INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
4094
4095 init_timer(&ugeth->phy_info_timer);
4096 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4097 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4098 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4099
4100 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4101 if (err) {
4102 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4103 ucc_geth_stop(ugeth);
4104 return err;
4105 }
4106
4107 netif_start_queue(dev);
4108
4109 return err;
4110}
4111
4112/* Stops the kernel queue, and halts the controller */
4113static int ucc_geth_close(struct net_device *dev)
4114{
4115 ucc_geth_private_t *ugeth = netdev_priv(dev);
4116
4117 ugeth_vdbg("%s: IN", __FUNCTION__);
4118
4119 ucc_geth_stop(ugeth);
4120
4121 /* Shutdown the PHY */
4122 if (ugeth->mii_info->phyinfo->close)
4123 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4124
4125 kfree(ugeth->mii_info);
4126
4127 netif_stop_queue(dev);
4128
4129 return 0;
4130}
4131
4132struct ethtool_ops ucc_geth_ethtool_ops = {
4133 .get_settings = NULL,
4134 .get_drvinfo = NULL,
4135 .get_regs_len = NULL,
4136 .get_regs = NULL,
4137 .get_link = NULL,
4138 .get_coalesce = NULL,
4139 .set_coalesce = NULL,
4140 .get_ringparam = NULL,
4141 .set_ringparam = NULL,
4142 .get_strings = NULL,
4143 .get_stats_count = NULL,
4144 .get_ethtool_stats = NULL,
4145};
4146
4147static int ucc_geth_probe(struct device *device)
4148{
4149 struct platform_device *pdev = to_platform_device(device);
4150 struct ucc_geth_platform_data *ugeth_pdata;
4151 struct net_device *dev = NULL;
4152 struct ucc_geth_private *ugeth = NULL;
4153 struct ucc_geth_info *ug_info;
4154 int err;
4155 static int mii_mng_configured = 0;
4156
4157 ugeth_vdbg("%s: IN", __FUNCTION__);
4158
4159 ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
4160
4161 ug_info = &ugeth_info[pdev->id];
4162 ug_info->uf_info.ucc_num = pdev->id;
4163 ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
4164 ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
4165 ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
4166 ug_info->uf_info.irq = platform_get_irq(pdev, 0);
4167 ug_info->phy_address = ugeth_pdata->phy_id;
4168 ug_info->enet_interface = ugeth_pdata->phy_interface;
4169 ug_info->board_flags = ugeth_pdata->board_flags;
4170 ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
4171
4172 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4173 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4174 ug_info->uf_info.irq);
4175
4176 if (ug_info == NULL) {
4177 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4178 pdev->id);
4179 return -ENODEV;
4180 }
4181
4182 if (!mii_mng_configured) {
4183 ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
4184 mii_mng_configured = 1;
4185 }
4186
4187 /* Create an ethernet device instance */
4188 dev = alloc_etherdev(sizeof(*ugeth));
4189
4190 if (dev == NULL)
4191 return -ENOMEM;
4192
4193 ugeth = netdev_priv(dev);
4194 spin_lock_init(&ugeth->lock);
4195
4196 dev_set_drvdata(device, dev);
4197
4198 /* Set the dev->base_addr to the gfar reg region */
4199 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4200
4201 SET_MODULE_OWNER(dev);
4202 SET_NETDEV_DEV(dev, device);
4203
4204 /* Fill in the dev structure */
4205 dev->open = ucc_geth_open;
4206 dev->hard_start_xmit = ucc_geth_start_xmit;
4207 dev->tx_timeout = ucc_geth_timeout;
4208 dev->watchdog_timeo = TX_TIMEOUT;
4209#ifdef CONFIG_UGETH_NAPI
4210 dev->poll = ucc_geth_poll;
4211 dev->weight = UCC_GETH_DEV_WEIGHT;
4212#endif /* CONFIG_UGETH_NAPI */
4213 dev->stop = ucc_geth_close;
4214 dev->get_stats = ucc_geth_get_stats;
4215// dev->change_mtu = ucc_geth_change_mtu;
4216 dev->mtu = 1500;
4217 dev->set_multicast_list = ucc_geth_set_multi;
4218 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4219
4220 err = register_netdev(dev);
4221 if (err) {
4222 ugeth_err("%s: Cannot register net device, aborting.",
4223 dev->name);
4224 free_netdev(dev);
4225 return err;
4226 }
4227
4228 ugeth->ug_info = ug_info;
4229 ugeth->dev = dev;
4230 memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
4231
4232 return 0;
4233}
4234
4235static int ucc_geth_remove(struct device *device)
4236{
4237 struct net_device *dev = dev_get_drvdata(device);
4238 struct ucc_geth_private *ugeth = netdev_priv(dev);
4239
4240 dev_set_drvdata(device, NULL);
4241 ucc_geth_memclean(ugeth);
4242 free_netdev(dev);
4243
4244 return 0;
4245}
4246
4247/* Structure for a device driver */
4248static struct device_driver ucc_geth_driver = {
4249 .name = DRV_NAME,
4250 .bus = &platform_bus_type,
4251 .probe = ucc_geth_probe,
4252 .remove = ucc_geth_remove,
4253};
4254
4255static int __init ucc_geth_init(void)
4256{
4257 int i;
4258 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4259 for (i = 0; i < 8; i++)
4260 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4261 sizeof(ugeth_primary_info));
4262
4263 return driver_register(&ucc_geth_driver);
4264}
4265
4266static void __exit ucc_geth_exit(void)
4267{
4268 driver_unregister(&ucc_geth_driver);
4269}
4270
4271module_init(ucc_geth_init);
4272module_exit(ucc_geth_exit);
4273
4274MODULE_AUTHOR("Freescale Semiconductor, Inc");
4275MODULE_DESCRIPTION(DRV_DESC);
4276MODULE_LICENSE("GPL");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
new file mode 100644
index 000000000000..005965f5dd9b
--- /dev/null
+++ b/drivers/net/ucc_geth.h
@@ -0,0 +1,1339 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * Internal header file for UCC Gigabit Ethernet unit routines.
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#ifndef __UCC_GETH_H__
19#define __UCC_GETH_H__
20
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/fsl_devices.h>
24
25#include <asm/immap_qe.h>
26#include <asm/qe.h>
27
28#include <asm/ucc.h>
29#include <asm/ucc_fast.h>
30
31#define NUM_TX_QUEUES 8
32#define NUM_RX_QUEUES 8
33#define NUM_BDS_IN_PREFETCHED_BDS 4
34#define TX_IP_OFFSET_ENTRY_MAX 8
35#define NUM_OF_PADDRS 4
36#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
37#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
38
39typedef struct ucc_mii_mng {
40 u32 miimcfg; /* MII management configuration reg */
41 u32 miimcom; /* MII management command reg */
42 u32 miimadd; /* MII management address reg */
43 u32 miimcon; /* MII management control reg */
44 u32 miimstat; /* MII management status reg */
45 u32 miimind; /* MII management indication reg */
46} __attribute__ ((packed)) ucc_mii_mng_t;
47
48typedef struct ucc_geth {
49 ucc_fast_t uccf;
50
51 u32 maccfg1; /* mac configuration reg. 1 */
52 u32 maccfg2; /* mac configuration reg. 2 */
53 u32 ipgifg; /* interframe gap reg. */
54 u32 hafdup; /* half-duplex reg. */
55 u8 res1[0x10];
56 ucc_mii_mng_t miimng; /* MII management structure */
57 u32 ifctl; /* interface control reg */
58 u32 ifstat; /* interface statux reg */
59 u32 macstnaddr1; /* mac station address part 1 reg */
60 u32 macstnaddr2; /* mac station address part 2 reg */
61 u8 res2[0x8];
62 u32 uempr; /* UCC Ethernet Mac parameter reg */
63 u32 utbipar; /* UCC tbi address reg */
64 u16 uescr; /* UCC Ethernet statistics control reg */
65 u8 res3[0x180 - 0x15A];
66 u32 tx64; /* Total number of frames (including bad
67 frames) transmitted that were exactly of the
68 minimal length (64 for un tagged, 68 for
69 tagged, or with length exactly equal to the
70 parameter MINLength */
71 u32 tx127; /* Total number of frames (including bad
72 frames) transmitted that were between
73 MINLength (Including FCS length==4) and 127
74 octets */
75 u32 tx255; /* Total number of frames (including bad
76 frames) transmitted that were between 128
77 (Including FCS length==4) and 255 octets */
78 u32 rx64; /* Total number of frames received including
79 bad frames that were exactly of the mninimal
80 length (64 bytes) */
81 u32 rx127; /* Total number of frames (including bad
82 frames) received that were between MINLength
83 (Including FCS length==4) and 127 octets */
84 u32 rx255; /* Total number of frames (including bad
85 frames) received that were between 128
86 (Including FCS length==4) and 255 octets */
87 u32 txok; /* Total number of octets residing in frames
88 that where involved in succesfull
89 transmission */
90 u16 txcf; /* Total number of PAUSE control frames
91 transmitted by this MAC */
92 u8 res4[0x2];
93 u32 tmca; /* Total number of frames that were transmitted
94 succesfully with the group address bit set
95 that are not broadcast frames */
96 u32 tbca; /* Total number of frames transmitted
97 succesfully that had destination address
98 field equal to the broadcast address */
99 u32 rxfok; /* Total number of frames received OK */
100 u32 rxbok; /* Total number of octets received OK */
101 u32 rbyt; /* Total number of octets received including
102 octets in bad frames. Must be implemented in
103 HW because it includes octets in frames that
104 never even reach the UCC */
105 u32 rmca; /* Total number of frames that were received
106 succesfully with the group address bit set
107 that are not broadcast frames */
108 u32 rbca; /* Total number of frames received succesfully
109 that had destination address equal to the
110 broadcast address */
111 u32 scar; /* Statistics carry register */
112 u32 scam; /* Statistics caryy mask register */
113 u8 res5[0x200 - 0x1c4];
114} __attribute__ ((packed)) ucc_geth_t;
115
116/* UCC GETH TEMODR Register */
117#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
118 */
119#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
120#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
121 checksums */
122#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
123 optimization
124 enhancement (mode1) */
125#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
126 */
127#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
128 shift */
129
130/* UCC GETH TEMODR Register */
131#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
132 statistics */
133#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
134 extended
135 features */
136#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
137 tagged << shift */
138#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
139 tagged << shift */
140#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
141 */
142#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
143 statistics */
144#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
145 filtering
146 vs.
147 mpc82xx-like
148 filtering */
149#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
150 shift */
151#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
152 dynamic max
153 frame length
154 */
155#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
156 dynamic min
157 frame length
158 */
159#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
160 checksums */
161#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
162 address to
163 4-byte
164 boundary */
165
166/* UCC GETH Event Register */
167#define UCCE_MPD 0x80000000 /* Magic packet
168 detection */
169#define UCCE_SCAR 0x40000000
170#define UCCE_GRA 0x20000000 /* Tx graceful
171 stop
172 complete */
173#define UCCE_CBPR 0x10000000
174#define UCCE_BSY 0x08000000
175#define UCCE_RXC 0x04000000
176#define UCCE_TXC 0x02000000
177#define UCCE_TXE 0x01000000
178#define UCCE_TXB7 0x00800000
179#define UCCE_TXB6 0x00400000
180#define UCCE_TXB5 0x00200000
181#define UCCE_TXB4 0x00100000
182#define UCCE_TXB3 0x00080000
183#define UCCE_TXB2 0x00040000
184#define UCCE_TXB1 0x00020000
185#define UCCE_TXB0 0x00010000
186#define UCCE_RXB7 0x00008000
187#define UCCE_RXB6 0x00004000
188#define UCCE_RXB5 0x00002000
189#define UCCE_RXB4 0x00001000
190#define UCCE_RXB3 0x00000800
191#define UCCE_RXB2 0x00000400
192#define UCCE_RXB1 0x00000200
193#define UCCE_RXB0 0x00000100
194#define UCCE_RXF7 0x00000080
195#define UCCE_RXF6 0x00000040
196#define UCCE_RXF5 0x00000020
197#define UCCE_RXF4 0x00000010
198#define UCCE_RXF3 0x00000008
199#define UCCE_RXF2 0x00000004
200#define UCCE_RXF1 0x00000002
201#define UCCE_RXF0 0x00000001
202
203#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0)
204#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0)
205
206#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
207 UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
208#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
209 UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
210#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
211 UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
212#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
213 UCCE_RXC | UCCE_TXC | UCCE_TXE)
214
215/* UCC GETH UPSMR (Protocol Specific Mode Register) */
216#define UPSMR_ECM 0x04000000 /* Enable CAM
217 Miss or
218 Enable
219 Filtering
220 Miss */
221#define UPSMR_HSE 0x02000000 /* Hardware
222 Statistics
223 Enable */
224#define UPSMR_PRO 0x00400000 /* Promiscuous*/
225#define UPSMR_CAP 0x00200000 /* CAM polarity
226 */
227#define UPSMR_RSH 0x00100000 /* Receive
228 Short Frames
229 */
230#define UPSMR_RPM 0x00080000 /* Reduced Pin
231 Mode
232 interfaces */
233#define UPSMR_R10M 0x00040000 /* RGMII/RMII
234 10 Mode */
235#define UPSMR_RLPB 0x00020000 /* RMII
236 Loopback
237 Mode */
238#define UPSMR_TBIM 0x00010000 /* Ten-bit
239 Interface
240 Mode */
241#define UPSMR_RMM 0x00001000 /* RMII/RGMII
242 Mode */
243#define UPSMR_CAM 0x00000400 /* CAM Address
244 Matching */
245#define UPSMR_BRO 0x00000200 /* Broadcast
246 Address */
247#define UPSMR_RES1 0x00002000 /* Reserved
248 feild - must
249 be 1 */
250
251/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
252#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
253 Rx */
254#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
255 Tx */
256#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
257 synchronized
258 to Rx stream
259 */
260#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
261#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
262 synchronized
263 to Tx stream
264 */
265#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
266
267/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
268#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
269 Length <<
270 shift */
271#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
272 Length mask */
273#define MACCFG2_SRP 0x00000080 /* Soft Receive
274 Preamble */
275#define MACCFG2_STP 0x00000040 /* Soft
276 Transmit
277 Preamble */
278#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
279 must be set
280 to 1 */
281#define MACCFG2_LC 0x00000010 /* Length Check
282 */
283#define MACCFG2_MPE 0x00000008 /* Magic packet
284 detect */
285#define MACCFG2_FDX 0x00000001 /* Full Duplex */
286#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
287 mask */
288#define MACCFG2_PAD_CRC 0x00000004
289#define MACCFG2_CRC_EN 0x00000002
290#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
291 Padding
292 short frames
293 nor CRC */
294#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
295 only */
296#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
297#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
298 (MII/RMII/RGMII
299 10/100bps) */
300#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
301 (GMII/TBI/RTB/RGMII
302 1000bps ) */
303#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
304 covering all
305 relevant
306 bits */
307
308/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
309#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
310 back-to-back
311 inter frame
312 gap part 1.
313 << shift */
314#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
315 back-to-back
316 inter frame
317 gap part 2.
318 << shift */
319#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
320 Enforcement
321 << shift */
322#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
323 inter frame
324 gap << shift
325 */
326#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
327 inter frame gap part
328 1. max val */
329#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
330 inter frame gap part
331 2. max val */
332#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
333 Enforcement max val */
334#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
335 frame gap max val */
336#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
337#define IPGIFG_NBTB_IPG_MASK 0x007F0000
338#define IPGIFG_MIN_IFG_MASK 0x0000FF00
339#define IPGIFG_BTB_IPG_MASK 0x0000007F
340
341/* UCC GETH HAFDUP (Half Duplex Register) */
342#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
343 Binary
344 Exponential
345 Backoff
346 Truncation
347 << shift */
348#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
349 Exponential Backoff
350 Truncation max val */
351#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
352 Binary
353 Exponential
354 Backoff */
355#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
356 pressure no
357 backoff */
358#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
359#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
360 Defer */
361#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
362 Retransmission
363 << shift */
364#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
365 Retransmission max
366 val */
367#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
368 Window <<
369 shift */
370#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
371 val */
372#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
373#define HALFDUP_RETRANS_MASK 0x0000F000
374#define HALFDUP_COL_WINDOW_MASK 0x0000003F
375
376/* UCC GETH UCCS (Ethernet Status Register) */
377#define UCCS_BPR 0x02 /* Back pressure (in
378 half duplex mode) */
379#define UCCS_PAU 0x02 /* Pause state (in full
380 duplex mode) */
381#define UCCS_MPD 0x01 /* Magic Packet
382 Detected */
383
384/* UCC GETH MIIMCFG (MII Management Configuration Register) */
385#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset
386 management */
387#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble
388 suppress */
389#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide
390 << shift */
391#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val
392 */
393#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */
394#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */
395#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */
396#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */
397#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10
398 */
399#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14
400 */
401#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16
402 */
403#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20
404 */
405#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28
406 */
407#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32
408 */
409#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48
410 */
411#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64
412 */
413#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80
414 */
415#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by
416 112 */
417#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by
418 160 */
419#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by
420 224 */
421
422/* UCC GETH MIIMCOM (MII Management Command Register) */
423#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
424#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
425
426/* UCC GETH MIIMADD (MII Management Address Register) */
427#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address
428 << shift */
429#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register
430 << shift */
431
432/* UCC GETH MIIMCON (MII Management Control Register) */
433#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control
434 << shift */
435#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status
436 << shift */
437
438/* UCC GETH MIIMIND (MII Management Indicator Register) */
439#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */
440#define MIIMIND_SCAN 0x00000002 /* Scan in
441 progress */
442#define MIIMIND_BUSY 0x00000001
443
444/* UCC GETH IFSTAT (Interface Status Register) */
445#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
446 transmission
447 defer */
448
449/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
450#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
451 address 6th
452 octet <<
453 shift */
454#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
455 address 5th
456 octet <<
457 shift */
458#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
459 address 4th
460 octet <<
461 shift */
462#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
463 address 3rd
464 octet <<
465 shift */
466
467/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
468#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
469 address 2nd
470 octet <<
471 shift */
472#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
473 address 1st
474 octet <<
475 shift */
476
477/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
478#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
479 value <<
480 shift */
481#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
482 pause time
483 value <<
484 shift */
485
486/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
487#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
488 << shift */
489#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
490 mask */
491
492/* UCC GETH UESCR (Ethernet Statistics Control Register) */
493#define UESCR_AUTOZ 0x8000 /* Automatically zero
494 addressed
495 statistical counter
496 values */
497#define UESCR_CLRCNT 0x4000 /* Clear all statistics
498 counters */
499#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
500 Coalescing
501 Value <<
502 shift */
503#define UESCR_SCOV_SHIFT (15 - 15) /* Status
504 Coalescing
505 Value <<
506 shift */
507
508/* UCC GETH UDSR (Data Synchronization Register) */
509#define UDSR_MAGIC 0x067E
510
511typedef struct ucc_geth_thread_data_tx {
512 u8 res0[104];
513} __attribute__ ((packed)) ucc_geth_thread_data_tx_t;
514
515typedef struct ucc_geth_thread_data_rx {
516 u8 res0[40];
517} __attribute__ ((packed)) ucc_geth_thread_data_rx_t;
518
519/* Send Queue Queue-Descriptor */
520typedef struct ucc_geth_send_queue_qd {
521 u32 bd_ring_base; /* pointer to BD ring base address */
522 u8 res0[0x8];
523 u32 last_bd_completed_address;/* initialize to last entry in BD ring */
524 u8 res1[0x30];
525} __attribute__ ((packed)) ucc_geth_send_queue_qd_t;
526
527typedef struct ucc_geth_send_queue_mem_region {
528 ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES];
529} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t;
530
531typedef struct ucc_geth_thread_tx_pram {
532 u8 res0[64];
533} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t;
534
535typedef struct ucc_geth_thread_rx_pram {
536 u8 res0[128];
537} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t;
538
539#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
540#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
541#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
542
543typedef struct ucc_geth_scheduler {
544 u16 cpucount0; /* CPU packet counter */
545 u16 cpucount1; /* CPU packet counter */
546 u16 cecount0; /* QE packet counter */
547 u16 cecount1; /* QE packet counter */
548 u16 cpucount2; /* CPU packet counter */
549 u16 cpucount3; /* CPU packet counter */
550 u16 cecount2; /* QE packet counter */
551 u16 cecount3; /* QE packet counter */
552 u16 cpucount4; /* CPU packet counter */
553 u16 cpucount5; /* CPU packet counter */
554 u16 cecount4; /* QE packet counter */
555 u16 cecount5; /* QE packet counter */
556 u16 cpucount6; /* CPU packet counter */
557 u16 cpucount7; /* CPU packet counter */
558 u16 cecount6; /* QE packet counter */
559 u16 cecount7; /* QE packet counter */
560 u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
561 u32 rtsrshadow; /* temporary variable handled by QE */
562 u32 time; /* temporary variable handled by QE */
563 u32 ttl; /* temporary variable handled by QE */
564 u32 mblinterval; /* max burst length interval */
565 u16 nortsrbytetime; /* normalized value of byte time in tsr units */
566 u8 fracsiz; /* radix 2 log value of denom. of
567 NorTSRByteTime */
568 u8 res0[1];
569 u8 strictpriorityq; /* Strict Priority Mask register */
570 u8 txasap; /* Transmit ASAP register */
571 u8 extrabw; /* Extra BandWidth register */
572 u8 oldwfqmask; /* temporary variable handled by QE */
573 u8 weightfactor[NUM_TX_QUEUES];
574 /**< weight factor for queues */
575 u32 minw; /* temporary variable handled by QE */
576 u8 res1[0x70 - 0x64];
577} __attribute__ ((packed)) ucc_geth_scheduler_t;
578
579typedef struct ucc_geth_tx_firmware_statistics_pram {
580 u32 sicoltx; /* single collision */
581 u32 mulcoltx; /* multiple collision */
582 u32 latecoltxfr; /* late collision */
583 u32 frabortduecol; /* frames aborted due to transmit collision */
584 u32 frlostinmactxer; /* frames lost due to internal MAC error
585 transmission that are not counted on any
586 other counter */
587 u32 carriersenseertx; /* carrier sense error */
588 u32 frtxok; /* frames transmitted OK */
589 u32 txfrexcessivedefer; /* frames with defferal time greater than
590 specified threshold */
591 u32 txpkts256; /* total packets (including bad) between 256
592 and 511 octets */
593 u32 txpkts512; /* total packets (including bad) between 512
594 and 1023 octets */
595 u32 txpkts1024; /* total packets (including bad) between 1024
596 and 1518 octets */
597 u32 txpktsjumbo; /* total packets (including bad) between 1024
598 and MAXLength octets */
599} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t;
600
601typedef struct ucc_geth_rx_firmware_statistics_pram {
602 u32 frrxfcser; /* frames with crc error */
603 u32 fraligner; /* frames with alignment error */
604 u32 inrangelenrxer; /* in range length error */
605 u32 outrangelenrxer; /* out of range length error */
606 u32 frtoolong; /* frame too long */
607 u32 runt; /* runt */
608 u32 verylongevent; /* very long event */
609 u32 symbolerror; /* symbol error */
610 u32 dropbsy; /* drop because of BD not ready */
611 u8 res0[0x8];
612 u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
613 or type mismatch) */
614 u32 underpkts; /* total frames less than 64 octets */
615 u32 pkts256; /* total frames (including bad) between 256 and
616 511 octets */
617 u32 pkts512; /* total frames (including bad) between 512 and
618 1023 octets */
619 u32 pkts1024; /* total frames (including bad) between 1024
620 and 1518 octets */
621 u32 pktsjumbo; /* total frames (including bad) between 1024
622 and MAXLength octets */
623 u32 frlossinmacer; /* frames lost because of internal MAC error
624 that is not counted in any other counter */
625 u32 pausefr; /* pause frames */
626 u8 res1[0x4];
627 u32 removevlan; /* total frames that had their VLAN tag removed
628 */
629 u32 replacevlan; /* total frames that had their VLAN tag
630 replaced */
631 u32 insertvlan; /* total frames that had their VLAN tag
632 inserted */
633} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t;
634
635typedef struct ucc_geth_rx_interrupt_coalescing_entry {
636 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
637 value */
638 u32 interruptcoalescingcounter; /* interrupt coalescing counter,
639 initialize to
640 interruptcoalescingmaxvalue */
641} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t;
642
643typedef struct ucc_geth_rx_interrupt_coalescing_table {
644 ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES];
645 /**< interrupt coalescing entry */
646} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t;
647
648typedef struct ucc_geth_rx_prefetched_bds {
649 qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
650} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t;
651
652typedef struct ucc_geth_rx_bd_queues_entry {
653 u32 bdbaseptr; /* BD base pointer */
654 u32 bdptr; /* BD pointer */
655 u32 externalbdbaseptr; /* external BD base pointer */
656 u32 externalbdptr; /* external BD pointer */
657} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t;
658
659typedef struct ucc_geth_tx_global_pram {
660 u16 temoder;
661 u8 res0[0x38 - 0x02];
662 u32 sqptr; /* a base pointer to send queue memory region */
663 u32 schedulerbasepointer; /* a base pointer to scheduler memory
664 region */
665 u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
666 u32 tstate; /* tx internal state. High byte contains
667 function code */
668 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
669 u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
670 u32 tqptr; /* a base pointer to the Tx Queues Memory
671 Region */
672 u8 res2[0x80 - 0x74];
673} __attribute__ ((packed)) ucc_geth_tx_global_pram_t;
674
675/* structure representing Extended Filtering Global Parameters in PRAM */
676typedef struct ucc_geth_exf_global_pram {
677 u32 l2pcdptr; /* individual address filter, high */
678 u8 res0[0x10 - 0x04];
679} __attribute__ ((packed)) ucc_geth_exf_global_pram_t;
680
681typedef struct ucc_geth_rx_global_pram {
682 u32 remoder; /* ethernet mode reg. */
683 u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
684 u32 res0[0x1];
685 u8 res1[0x20 - 0xC];
686 u16 typeorlen; /* cutoff point less than which, type/len field
687 is considered length */
688 u8 res2[0x1];
689 u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
690 u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
691 u8 res3[0x30 - 0x28];
692 u32 intcoalescingptr; /* Interrupt coalescing table pointer */
693 u8 res4[0x36 - 0x34];
694 u8 rstate; /* rx internal state. High byte contains
695 function code */
696 u8 res5[0x46 - 0x37];
697 u16 mrblr; /* max receive buffer length reg. */
698 u32 rbdqptr; /* base pointer to RxBD parameter table
699 description */
700 u16 mflr; /* max frame length reg. */
701 u16 minflr; /* min frame length reg. */
702 u16 maxd1; /* max dma1 length reg. */
703 u16 maxd2; /* max dma2 length reg. */
704 u32 ecamptr; /* external CAM address */
705 u32 l2qt; /* VLAN priority mapping table. */
706 u32 l3qt[0x8]; /* IP priority mapping table. */
707 u16 vlantype; /* vlan type */
708 u16 vlantci; /* default vlan tci */
709 u8 addressfiltering[64]; /* address filtering data structure */
710 u32 exfGlobalParam; /* base address for extended filtering global
711 parameters */
712 u8 res6[0x100 - 0xC4]; /* Initialize to zero */
713} __attribute__ ((packed)) ucc_geth_rx_global_pram_t;
714
715#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
716
717/* structure representing InitEnet command */
718typedef struct ucc_geth_init_pram {
719 u8 resinit1;
720 u8 resinit2;
721 u8 resinit3;
722 u8 resinit4;
723 u16 resinit5;
724 u8 res1[0x1];
725 u8 largestexternallookupkeysize;
726 u32 rgftgfrxglobal;
727 u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
728 u8 res2[0x38 - 0x30];
729 u32 txglobal; /* tx global */
730 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
731 u8 res3[0x1];
732} __attribute__ ((packed)) ucc_geth_init_pram_t;
733
734#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
735#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
736
737#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
738#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
739#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
740#define ENET_INIT_PARAM_SNUM_SHIFT 24
741
742#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
743#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
744#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
745#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
746#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
747
748/* structure representing 82xx Address Filtering Enet Address in PRAM */
749typedef struct ucc_geth_82xx_enet_address {
750 u8 res1[0x2];
751 u16 h; /* address (MSB) */
752 u16 m; /* address */
753 u16 l; /* address (LSB) */
754} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t;
755
756/* structure representing 82xx Address Filtering PRAM */
757typedef struct ucc_geth_82xx_address_filtering_pram {
758 u32 iaddr_h; /* individual address filter, high */
759 u32 iaddr_l; /* individual address filter, low */
760 u32 gaddr_h; /* group address filter, high */
761 u32 gaddr_l; /* group address filter, low */
762 ucc_geth_82xx_enet_address_t taddr;
763 ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS];
764 u8 res0[0x40 - 0x38];
765} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t;
766
767/* GETH Tx firmware statistics structure, used when calling
768 UCC_GETH_GetStatistics. */
769typedef struct ucc_geth_tx_firmware_statistics {
770 u32 sicoltx; /* single collision */
771 u32 mulcoltx; /* multiple collision */
772 u32 latecoltxfr; /* late collision */
773 u32 frabortduecol; /* frames aborted due to transmit collision */
774 u32 frlostinmactxer; /* frames lost due to internal MAC error
775 transmission that are not counted on any
776 other counter */
777 u32 carriersenseertx; /* carrier sense error */
778 u32 frtxok; /* frames transmitted OK */
779 u32 txfrexcessivedefer; /* frames with defferal time greater than
780 specified threshold */
781 u32 txpkts256; /* total packets (including bad) between 256
782 and 511 octets */
783 u32 txpkts512; /* total packets (including bad) between 512
784 and 1023 octets */
785 u32 txpkts1024; /* total packets (including bad) between 1024
786 and 1518 octets */
787 u32 txpktsjumbo; /* total packets (including bad) between 1024
788 and MAXLength octets */
789} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t;
790
791/* GETH Rx firmware statistics structure, used when calling
792 UCC_GETH_GetStatistics. */
793typedef struct ucc_geth_rx_firmware_statistics {
794 u32 frrxfcser; /* frames with crc error */
795 u32 fraligner; /* frames with alignment error */
796 u32 inrangelenrxer; /* in range length error */
797 u32 outrangelenrxer; /* out of range length error */
798 u32 frtoolong; /* frame too long */
799 u32 runt; /* runt */
800 u32 verylongevent; /* very long event */
801 u32 symbolerror; /* symbol error */
802 u32 dropbsy; /* drop because of BD not ready */
803 u8 res0[0x8];
804 u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
805 or type mismatch) */
806 u32 underpkts; /* total frames less than 64 octets */
807 u32 pkts256; /* total frames (including bad) between 256 and
808 511 octets */
809 u32 pkts512; /* total frames (including bad) between 512 and
810 1023 octets */
811 u32 pkts1024; /* total frames (including bad) between 1024
812 and 1518 octets */
813 u32 pktsjumbo; /* total frames (including bad) between 1024
814 and MAXLength octets */
815 u32 frlossinmacer; /* frames lost because of internal MAC error
816 that is not counted in any other counter */
817 u32 pausefr; /* pause frames */
818 u8 res1[0x4];
819 u32 removevlan; /* total frames that had their VLAN tag removed
820 */
821 u32 replacevlan; /* total frames that had their VLAN tag
822 replaced */
823 u32 insertvlan; /* total frames that had their VLAN tag
824 inserted */
825} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t;
826
827/* GETH hardware statistics structure, used when calling
828 UCC_GETH_GetStatistics. */
829typedef struct ucc_geth_hardware_statistics {
830 u32 tx64; /* Total number of frames (including bad
831 frames) transmitted that were exactly of the
832 minimal length (64 for un tagged, 68 for
833 tagged, or with length exactly equal to the
834 parameter MINLength */
835 u32 tx127; /* Total number of frames (including bad
836 frames) transmitted that were between
837 MINLength (Including FCS length==4) and 127
838 octets */
839 u32 tx255; /* Total number of frames (including bad
840 frames) transmitted that were between 128
841 (Including FCS length==4) and 255 octets */
842 u32 rx64; /* Total number of frames received including
843 bad frames that were exactly of the mninimal
844 length (64 bytes) */
845 u32 rx127; /* Total number of frames (including bad
846 frames) received that were between MINLength
847 (Including FCS length==4) and 127 octets */
848 u32 rx255; /* Total number of frames (including bad
849 frames) received that were between 128
850 (Including FCS length==4) and 255 octets */
851 u32 txok; /* Total number of octets residing in frames
852 that where involved in succesfull
853 transmission */
854 u16 txcf; /* Total number of PAUSE control frames
855 transmitted by this MAC */
856 u32 tmca; /* Total number of frames that were transmitted
857 succesfully with the group address bit set
858 that are not broadcast frames */
859 u32 tbca; /* Total number of frames transmitted
860 succesfully that had destination address
861 field equal to the broadcast address */
862 u32 rxfok; /* Total number of frames received OK */
863 u32 rxbok; /* Total number of octets received OK */
864 u32 rbyt; /* Total number of octets received including
865 octets in bad frames. Must be implemented in
866 HW because it includes octets in frames that
867 never even reach the UCC */
868 u32 rmca; /* Total number of frames that were received
869 succesfully with the group address bit set
870 that are not broadcast frames */
871 u32 rbca; /* Total number of frames received succesfully
872 that had destination address equal to the
873 broadcast address */
874} __attribute__ ((packed)) ucc_geth_hardware_statistics_t;
875
876/* UCC GETH Tx errors returned via TxConf callback */
877#define TX_ERRORS_DEF 0x0200
878#define TX_ERRORS_EXDEF 0x0100
879#define TX_ERRORS_LC 0x0080
880#define TX_ERRORS_RL 0x0040
881#define TX_ERRORS_RC_MASK 0x003C
882#define TX_ERRORS_RC_SHIFT 2
883#define TX_ERRORS_UN 0x0002
884#define TX_ERRORS_CSL 0x0001
885
886/* UCC GETH Rx errors returned via RxStore callback */
887#define RX_ERRORS_CMR 0x0200
888#define RX_ERRORS_M 0x0100
889#define RX_ERRORS_BC 0x0080
890#define RX_ERRORS_MC 0x0040
891
892/* Transmit BD. These are in addition to values defined in uccf. */
893#define T_VID 0x003c0000 /* insert VLAN id index mask. */
894#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
895#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
896#define T_LC (((u32) TX_ERRORS_LC ) << 16)
897#define T_RL (((u32) TX_ERRORS_RL ) << 16)
898#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
899#define T_UN (((u32) TX_ERRORS_UN ) << 16)
900#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
901#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
902 | T_UN | T_CSL) /* transmit errors to report */
903
904/* Receive BD. These are in addition to values defined in uccf. */
905#define R_LG 0x00200000 /* Frame length violation. */
906#define R_NO 0x00100000 /* Non-octet aligned frame. */
907#define R_SH 0x00080000 /* Short frame. */
908#define R_CR 0x00040000 /* CRC error. */
909#define R_OV 0x00020000 /* Overrun. */
910#define R_IPCH 0x00010000 /* IP checksum check failed. */
911#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
912#define R_M (((u32) RX_ERRORS_M ) << 16)
913#define R_BC (((u32) RX_ERRORS_BC ) << 16)
914#define R_MC (((u32) RX_ERRORS_MC ) << 16)
915#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
916 report */
917#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
918 R_OV | R_IPCH) /* receive errors to discard */
919
920/* Alignments */
921#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
922#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
923#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
924#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
925#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
926 based on num of
927 threads, but always
928 using the maximum is
929 easier */
930#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
931#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
932#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
933#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
934#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a
935 guess */
936#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
937#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
938#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
939 is a
940 guess
941 */
942#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
943#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
944#define UCC_GETH_MRBLR_ALIGNMENT 128
945#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
946#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
947#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
948
949#define UCC_GETH_TAD_EF 0x80
950#define UCC_GETH_TAD_V 0x40
951#define UCC_GETH_TAD_REJ 0x20
952#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
953#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
954#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
955#define UCC_GETH_TAD_RQOS_SHIFT 0
956#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
957#define UCC_GETH_TAD_CFI 0x10
958
959#define UCC_GETH_VLAN_PRIORITY_MAX 8
960#define UCC_GETH_IP_PRIORITY_MAX 64
961#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
962#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
963#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
964
965#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
966
967/* Driver definitions */
968#define TX_BD_RING_LEN 0x10
969#define RX_BD_RING_LEN 0x10
970#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
971
972#define TX_RING_MOD_MASK(size) (size-1)
973#define RX_RING_MOD_MASK(size) (size-1)
974
975#define ENET_NUM_OCTETS_PER_ADDRESS 6
976#define ENET_GROUP_ADDR 0x01 /* Group address mask
977 for ethernet
978 addresses */
979
980#define TX_TIMEOUT (1*HZ)
981#define SKB_ALLOC_TIMEOUT 100000
982#define PHY_INIT_TIMEOUT 100000
983#define PHY_CHANGE_TIME 2
984
985/* Fast Ethernet (10/100 Mbps) */
986#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
987 */
988#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
989#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
990#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
991 */
992#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
993#define UCC_GETH_UTFTT_INIT 128
994/* Gigabit Ethernet (1000 Mbps) */
995#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
996 FIFO size */
997#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
998#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
999#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
1000 FIFO size */
1001#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
1002#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
1003
1004#define UCC_GETH_REMODER_INIT 0 /* bits that must be
1005 set */
1006#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
1007#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
1008 for this
1009 register */
1010#define UCC_GETH_MACCFG1_INIT 0
1011#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
1012#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \
1013 (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112)
1014
1015/* Ethernet speed */
1016typedef enum enet_speed {
1017 ENET_SPEED_10BT, /* 10 Base T */
1018 ENET_SPEED_100BT, /* 100 Base T */
1019 ENET_SPEED_1000BT /* 1000 Base T */
1020} enet_speed_e;
1021
1022/* Ethernet Address Type. */
1023typedef enum enet_addr_type {
1024 ENET_ADDR_TYPE_INDIVIDUAL,
1025 ENET_ADDR_TYPE_GROUP,
1026 ENET_ADDR_TYPE_BROADCAST
1027} enet_addr_type_e;
1028
1029/* TBI / MII Set Register */
1030typedef enum enet_tbi_mii_reg {
1031 ENET_TBI_MII_CR = 0x00, /* Control (CR ) */
1032 ENET_TBI_MII_SR = 0x01, /* Status (SR ) */
1033 ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */
1034 ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability
1035 (ANLPBPA) */
1036 ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */
1037 ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */
1038 ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page
1039 (ANLPANP) */
1040 ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */
1041 ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */
1042 ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */
1043} enet_tbi_mii_reg_e;
1044
1045/* UCC GETH 82xx Ethernet Address Recognition Location */
1046typedef enum ucc_geth_enet_address_recognition_location {
1047 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
1048 address */
1049 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
1050 station
1051 address
1052 paddr1 */
1053 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
1054 station
1055 address
1056 paddr2 */
1057 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
1058 station
1059 address
1060 paddr3 */
1061 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
1062 station
1063 address
1064 paddr4 */
1065 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
1066 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
1067 hash */
1068} ucc_geth_enet_address_recognition_location_e;
1069
1070/* UCC GETH vlan operation tagged */
1071typedef enum ucc_geth_vlan_operation_tagged {
1072 UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
1073 UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
1074 = 0x1, /* Tagged - replace vid portion of q tag */
1075 UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
1076 = 0x2, /* Tagged - if vid0 replace vid with default value */
1077 UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
1078 = 0x3 /* Tagged - extract q tag from frame */
1079} ucc_geth_vlan_operation_tagged_e;
1080
1081/* UCC GETH vlan operation non-tagged */
1082typedef enum ucc_geth_vlan_operation_non_tagged {
1083 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
1084 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
1085 q tag insert
1086 */
1087} ucc_geth_vlan_operation_non_tagged_e;
1088
1089/* UCC GETH Rx Quality of Service Mode */
1090typedef enum ucc_geth_qos_mode {
1091 UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
1092 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
1093 determined
1094 by L2
1095 criteria */
1096 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
1097 determined
1098 by L3
1099 criteria */
1100} ucc_geth_qos_mode_e;
1101
1102/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
1103 for combined functionality */
1104typedef enum ucc_geth_statistics_gathering_mode {
1105 UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
1106 statistics
1107 gathering */
1108 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
1109 hardware
1110 statistics
1111 gathering
1112 */
1113 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
1114 firmware
1115 tx
1116 statistics
1117 gathering
1118 */
1119 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
1120 firmware
1121 rx
1122 statistics
1123 gathering
1124 */
1125} ucc_geth_statistics_gathering_mode_e;
1126
1127/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
1128typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
1129 UCC_GETH_PAD_AND_CRC_MODE_NONE
1130 = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
1131 short frames
1132 nor CRC */
1133 UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
1134 = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
1135 CRC only */
1136 UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
1137 MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
1138} ucc_geth_maccfg2_pad_and_crc_mode_e;
1139
1140/* UCC GETH upsmr Flow Control Mode */
1141typedef enum ucc_geth_flow_control_mode {
1142 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
1143 flow control
1144 */
1145 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
1146 = 0x00004000 /* Send pause frame when RxFIFO reaches its
1147 emergency threshold */
1148} ucc_geth_flow_control_mode_e;
1149
1150/* UCC GETH number of threads */
1151typedef enum ucc_geth_num_of_threads {
1152 UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
1153 UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
1154 UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
1155 UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
1156 UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
1157} ucc_geth_num_of_threads_e;
1158
1159/* UCC GETH number of station addresses */
1160typedef enum ucc_geth_num_of_station_addresses {
1161 UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
1162 UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
1163} ucc_geth_num_of_station_addresses_e;
1164
1165typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS];
1166
1167/* UCC GETH 82xx Ethernet Address Container */
1168typedef struct enet_addr_container {
1169 enet_addr_t address; /* ethernet address */
1170 ucc_geth_enet_address_recognition_location_e location; /* location in
1171 82xx address
1172 recognition
1173 hardware */
1174 struct list_head node;
1175} enet_addr_container_t;
1176
1177#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node)
1178
1179/* UCC GETH Termination Action Descriptor (TAD) structure. */
1180typedef struct ucc_geth_tad_params {
1181 int rx_non_dynamic_extended_features_mode;
1182 int reject_frame;
1183 ucc_geth_vlan_operation_tagged_e vtag_op;
1184 ucc_geth_vlan_operation_non_tagged_e vnontag_op;
1185 ucc_geth_qos_mode_e rqos;
1186 u8 vpri;
1187 u16 vid;
1188} ucc_geth_tad_params_t;
1189
1190/* GETH protocol initialization structure */
1191typedef struct ucc_geth_info {
1192 ucc_fast_info_t uf_info;
1193 u8 numQueuesTx;
1194 u8 numQueuesRx;
1195 int ipCheckSumCheck;
1196 int ipCheckSumGenerate;
1197 int rxExtendedFiltering;
1198 u32 extendedFilteringChainPointer;
1199 u16 typeorlen;
1200 int dynamicMaxFrameLength;
1201 int dynamicMinFrameLength;
1202 u8 nonBackToBackIfgPart1;
1203 u8 nonBackToBackIfgPart2;
1204 u8 miminumInterFrameGapEnforcement;
1205 u8 backToBackInterFrameGap;
1206 int ipAddressAlignment;
1207 int lengthCheckRx;
1208 u32 mblinterval;
1209 u16 nortsrbytetime;
1210 u8 fracsiz;
1211 u8 strictpriorityq;
1212 u8 txasap;
1213 u8 extrabw;
1214 int miiPreambleSupress;
1215 u8 altBebTruncation;
1216 int altBeb;
1217 int backPressureNoBackoff;
1218 int noBackoff;
1219 int excessDefer;
1220 u8 maxRetransmission;
1221 u8 collisionWindow;
1222 int pro;
1223 int cap;
1224 int rsh;
1225 int rlpb;
1226 int cam;
1227 int bro;
1228 int ecm;
1229 int receiveFlowControl;
1230 u8 maxGroupAddrInHash;
1231 u8 maxIndAddrInHash;
1232 u8 prel;
1233 u16 maxFrameLength;
1234 u16 minFrameLength;
1235 u16 maxD1Length;
1236 u16 maxD2Length;
1237 u16 vlantype;
1238 u16 vlantci;
1239 u32 ecamptr;
1240 u32 eventRegMask;
1241 u16 pausePeriod;
1242 u16 extensionField;
1243 u8 phy_address;
1244 u32 board_flags;
1245 u32 phy_interrupt;
1246 u8 weightfactor[NUM_TX_QUEUES];
1247 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1248 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
1249 u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
1250 u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
1251 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
1252 u16 bdRingLenTx[NUM_TX_QUEUES];
1253 u16 bdRingLenRx[NUM_RX_QUEUES];
1254 enet_interface_e enet_interface;
1255 ucc_geth_num_of_station_addresses_e numStationAddresses;
1256 qe_fltr_largest_external_tbl_lookup_key_size_e
1257 largestexternallookupkeysize;
1258 ucc_geth_statistics_gathering_mode_e statisticsMode;
1259 ucc_geth_vlan_operation_tagged_e vlanOperationTagged;
1260 ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged;
1261 ucc_geth_qos_mode_e rxQoSMode;
1262 ucc_geth_flow_control_mode_e aufc;
1263 ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc;
1264 ucc_geth_num_of_threads_e numThreadsTx;
1265 ucc_geth_num_of_threads_e numThreadsRx;
1266 qe_risc_allocation_e riscTx;
1267 qe_risc_allocation_e riscRx;
1268} ucc_geth_info_t;
1269
1270/* structure representing UCC GETH */
1271typedef struct ucc_geth_private {
1272 ucc_geth_info_t *ug_info;
1273 ucc_fast_private_t *uccf;
1274 struct net_device *dev;
1275 struct net_device_stats stats; /* linux network statistics */
1276 ucc_geth_t *ug_regs;
1277 ucc_geth_init_pram_t *p_init_enet_param_shadow;
1278 ucc_geth_exf_global_pram_t *p_exf_glbl_param;
1279 u32 exf_glbl_param_offset;
1280 ucc_geth_rx_global_pram_t *p_rx_glbl_pram;
1281 u32 rx_glbl_pram_offset;
1282 ucc_geth_tx_global_pram_t *p_tx_glbl_pram;
1283 u32 tx_glbl_pram_offset;
1284 ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg;
1285 u32 send_q_mem_reg_offset;
1286 ucc_geth_thread_data_tx_t *p_thread_data_tx;
1287 u32 thread_dat_tx_offset;
1288 ucc_geth_thread_data_rx_t *p_thread_data_rx;
1289 u32 thread_dat_rx_offset;
1290 ucc_geth_scheduler_t *p_scheduler;
1291 u32 scheduler_offset;
1292 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
1293 u32 tx_fw_statistics_pram_offset;
1294 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
1295 u32 rx_fw_statistics_pram_offset;
1296 ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl;
1297 u32 rx_irq_coalescing_tbl_offset;
1298 ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
1299 u32 rx_bd_qs_tbl_offset;
1300 u8 *p_tx_bd_ring[NUM_TX_QUEUES];
1301 u32 tx_bd_ring_offset[NUM_TX_QUEUES];
1302 u8 *p_rx_bd_ring[NUM_RX_QUEUES];
1303 u32 rx_bd_ring_offset[NUM_RX_QUEUES];
1304 u8 *confBd[NUM_TX_QUEUES];
1305 u8 *txBd[NUM_TX_QUEUES];
1306 u8 *rxBd[NUM_RX_QUEUES];
1307 int badFrame[NUM_RX_QUEUES];
1308 u16 cpucount[NUM_TX_QUEUES];
1309 volatile u16 *p_cpucount[NUM_TX_QUEUES];
1310 int indAddrRegUsed[NUM_OF_PADDRS];
1311 enet_addr_t paddr[NUM_OF_PADDRS];
1312 u8 numGroupAddrInHash;
1313 u8 numIndAddrInHash;
1314 u8 numIndAddrInReg;
1315 int rx_extended_features;
1316 int rx_non_dynamic_extended_features;
1317 struct list_head conf_skbs;
1318 struct list_head group_hash_q;
1319 struct list_head ind_hash_q;
1320 u32 saved_uccm;
1321 spinlock_t lock;
1322 /* pointers to arrays of skbuffs for tx and rx */
1323 struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
1324 struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
1325 /* indices pointing to the next free sbk in skb arrays */
1326 u16 skb_curtx[NUM_TX_QUEUES];
1327 u16 skb_currx[NUM_RX_QUEUES];
1328 /* index of the first skb which hasn't been transmitted yet. */
1329 u16 skb_dirtytx[NUM_TX_QUEUES];
1330
1331 struct work_struct tq;
1332 struct timer_list phy_info_timer;
1333 struct ugeth_mii_info *mii_info;
1334 int oldspeed;
1335 int oldduplex;
1336 int oldlink;
1337} ucc_geth_private_t;
1338
1339#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
new file mode 100644
index 000000000000..f91028c5386d
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.c
@@ -0,0 +1,801 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * UCC GETH Driver -- PHY handling
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/version.h>
36#include <linux/crc32.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43
44#include "ucc_geth.h"
45#include "ucc_geth_phy.h"
46#include <platforms/83xx/mpc8360e_pb.h>
47
48#define ugphy_printk(level, format, arg...) \
49 printk(level format "\n", ## arg)
50
51#define ugphy_dbg(format, arg...) \
52 ugphy_printk(KERN_DEBUG, format , ## arg)
53#define ugphy_err(format, arg...) \
54 ugphy_printk(KERN_ERR, format , ## arg)
55#define ugphy_info(format, arg...) \
56 ugphy_printk(KERN_INFO, format , ## arg)
57#define ugphy_warn(format, arg...) \
58 ugphy_printk(KERN_WARNING, format , ## arg)
59
60#ifdef UGETH_VERBOSE_DEBUG
61#define ugphy_vdbg ugphy_dbg
62#else
63#define ugphy_vdbg(fmt, args...) do { } while (0)
64#endif /* UGETH_VERBOSE_DEBUG */
65
66static void config_genmii_advert(struct ugeth_mii_info *mii_info);
67static void genmii_setup_forced(struct ugeth_mii_info *mii_info);
68static void genmii_restart_aneg(struct ugeth_mii_info *mii_info);
69static int gbit_config_aneg(struct ugeth_mii_info *mii_info);
70static int genmii_config_aneg(struct ugeth_mii_info *mii_info);
71static int genmii_update_link(struct ugeth_mii_info *mii_info);
72static int genmii_read_status(struct ugeth_mii_info *mii_info);
73u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum);
74void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val);
75
76static u8 *bcsr_regs = NULL;
77
78/* Write value to the PHY for this device to the register at regnum, */
79/* waiting until the write is done before it returns. All PHY */
80/* configuration has to be done through the TSEC1 MIIM regs */
81void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
82{
83 ucc_geth_private_t *ugeth = netdev_priv(dev);
84 ucc_mii_mng_t *mii_regs;
85 enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
86 u32 tmp_reg;
87
88 ugphy_vdbg("%s: IN", __FUNCTION__);
89
90 spin_lock_irq(&ugeth->lock);
91
92 mii_regs = ugeth->mii_info->mii_regs;
93
94 /* Set this UCC to be the master of the MII managment */
95 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
96
97 /* Stop the MII management read cycle */
98 out_be32(&mii_regs->miimcom, 0);
99 /* Setting up the MII Mangement Address Register */
100 tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
101 out_be32(&mii_regs->miimadd, tmp_reg);
102
103 /* Setting up the MII Mangement Control Register with the value */
104 out_be32(&mii_regs->miimcon, (u32) value);
105
106 /* Wait till MII management write is complete */
107 while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
108 cpu_relax();
109
110 spin_unlock_irq(&ugeth->lock);
111
112 udelay(10000);
113}
114
115/* Reads from register regnum in the PHY for device dev, */
116/* returning the value. Clears miimcom first. All PHY */
117/* configuration has to be done through the TSEC1 MIIM regs */
118int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
119{
120 ucc_geth_private_t *ugeth = netdev_priv(dev);
121 ucc_mii_mng_t *mii_regs;
122 enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
123 u32 tmp_reg;
124 u16 value;
125
126 ugphy_vdbg("%s: IN", __FUNCTION__);
127
128 spin_lock_irq(&ugeth->lock);
129
130 mii_regs = ugeth->mii_info->mii_regs;
131
132 /* Setting up the MII Mangement Address Register */
133 tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
134 out_be32(&mii_regs->miimadd, tmp_reg);
135
136 /* Perform an MII management read cycle */
137 out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE);
138
139 /* Wait till MII management write is complete */
140 while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
141 cpu_relax();
142
143 udelay(10000);
144
145 /* Read MII management status */
146 value = (u16) in_be32(&mii_regs->miimstat);
147 out_be32(&mii_regs->miimcom, 0);
148 if (value == 0xffff)
149 ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x",
150 mii_id, mii_reg, (u32) & (mii_regs->miimcfg));
151
152 spin_unlock_irq(&ugeth->lock);
153
154 return (value);
155}
156
157void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info)
158{
159 ugphy_vdbg("%s: IN", __FUNCTION__);
160
161 if (mii_info->phyinfo->ack_interrupt)
162 mii_info->phyinfo->ack_interrupt(mii_info);
163}
164
165void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
166 u32 interrupts)
167{
168 ugphy_vdbg("%s: IN", __FUNCTION__);
169
170 mii_info->interrupts = interrupts;
171 if (mii_info->phyinfo->config_intr)
172 mii_info->phyinfo->config_intr(mii_info);
173}
174
175/* Writes MII_ADVERTISE with the appropriate values, after
176 * sanitizing advertise to make sure only supported features
177 * are advertised
178 */
179static void config_genmii_advert(struct ugeth_mii_info *mii_info)
180{
181 u32 advertise;
182 u16 adv;
183
184 ugphy_vdbg("%s: IN", __FUNCTION__);
185
186 /* Only allow advertising what this PHY supports */
187 mii_info->advertising &= mii_info->phyinfo->features;
188 advertise = mii_info->advertising;
189
190 /* Setup standard advertisement */
191 adv = phy_read(mii_info, MII_ADVERTISE);
192 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
193 if (advertise & ADVERTISED_10baseT_Half)
194 adv |= ADVERTISE_10HALF;
195 if (advertise & ADVERTISED_10baseT_Full)
196 adv |= ADVERTISE_10FULL;
197 if (advertise & ADVERTISED_100baseT_Half)
198 adv |= ADVERTISE_100HALF;
199 if (advertise & ADVERTISED_100baseT_Full)
200 adv |= ADVERTISE_100FULL;
201 phy_write(mii_info, MII_ADVERTISE, adv);
202}
203
204static void genmii_setup_forced(struct ugeth_mii_info *mii_info)
205{
206 u16 ctrl;
207 u32 features = mii_info->phyinfo->features;
208
209 ugphy_vdbg("%s: IN", __FUNCTION__);
210
211 ctrl = phy_read(mii_info, MII_BMCR);
212
213 ctrl &=
214 ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
215 ctrl |= BMCR_RESET;
216
217 switch (mii_info->speed) {
218 case SPEED_1000:
219 if (features & (SUPPORTED_1000baseT_Half
220 | SUPPORTED_1000baseT_Full)) {
221 ctrl |= BMCR_SPEED1000;
222 break;
223 }
224 mii_info->speed = SPEED_100;
225 case SPEED_100:
226 if (features & (SUPPORTED_100baseT_Half
227 | SUPPORTED_100baseT_Full)) {
228 ctrl |= BMCR_SPEED100;
229 break;
230 }
231 mii_info->speed = SPEED_10;
232 case SPEED_10:
233 if (features & (SUPPORTED_10baseT_Half
234 | SUPPORTED_10baseT_Full))
235 break;
236 default: /* Unsupported speed! */
237 ugphy_err("%s: Bad speed!", mii_info->dev->name);
238 break;
239 }
240
241 phy_write(mii_info, MII_BMCR, ctrl);
242}
243
244/* Enable and Restart Autonegotiation */
245static void genmii_restart_aneg(struct ugeth_mii_info *mii_info)
246{
247 u16 ctl;
248
249 ugphy_vdbg("%s: IN", __FUNCTION__);
250
251 ctl = phy_read(mii_info, MII_BMCR);
252 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
253 phy_write(mii_info, MII_BMCR, ctl);
254}
255
256static int gbit_config_aneg(struct ugeth_mii_info *mii_info)
257{
258 u16 adv;
259 u32 advertise;
260
261 ugphy_vdbg("%s: IN", __FUNCTION__);
262
263 if (mii_info->autoneg) {
264 /* Configure the ADVERTISE register */
265 config_genmii_advert(mii_info);
266 advertise = mii_info->advertising;
267
268 adv = phy_read(mii_info, MII_1000BASETCONTROL);
269 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
270 MII_1000BASETCONTROL_HALFDUPLEXCAP);
271 if (advertise & SUPPORTED_1000baseT_Half)
272 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
273 if (advertise & SUPPORTED_1000baseT_Full)
274 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
275 phy_write(mii_info, MII_1000BASETCONTROL, adv);
276
277 /* Start/Restart aneg */
278 genmii_restart_aneg(mii_info);
279 } else
280 genmii_setup_forced(mii_info);
281
282 return 0;
283}
284
285static int genmii_config_aneg(struct ugeth_mii_info *mii_info)
286{
287 ugphy_vdbg("%s: IN", __FUNCTION__);
288
289 if (mii_info->autoneg) {
290 config_genmii_advert(mii_info);
291 genmii_restart_aneg(mii_info);
292 } else
293 genmii_setup_forced(mii_info);
294
295 return 0;
296}
297
298static int genmii_update_link(struct ugeth_mii_info *mii_info)
299{
300 u16 status;
301
302 ugphy_vdbg("%s: IN", __FUNCTION__);
303
304 /* Do a fake read */
305 phy_read(mii_info, MII_BMSR);
306
307 /* Read link and autonegotiation status */
308 status = phy_read(mii_info, MII_BMSR);
309 if ((status & BMSR_LSTATUS) == 0)
310 mii_info->link = 0;
311 else
312 mii_info->link = 1;
313
314 /* If we are autonegotiating, and not done,
315 * return an error */
316 if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
317 return -EAGAIN;
318
319 return 0;
320}
321
322static int genmii_read_status(struct ugeth_mii_info *mii_info)
323{
324 u16 status;
325 int err;
326
327 ugphy_vdbg("%s: IN", __FUNCTION__);
328
329 /* Update the link, but return if there
330 * was an error */
331 err = genmii_update_link(mii_info);
332 if (err)
333 return err;
334
335 if (mii_info->autoneg) {
336 status = phy_read(mii_info, MII_LPA);
337
338 if (status & (LPA_10FULL | LPA_100FULL))
339 mii_info->duplex = DUPLEX_FULL;
340 else
341 mii_info->duplex = DUPLEX_HALF;
342 if (status & (LPA_100FULL | LPA_100HALF))
343 mii_info->speed = SPEED_100;
344 else
345 mii_info->speed = SPEED_10;
346 mii_info->pause = 0;
347 }
348 /* On non-aneg, we assume what we put in BMCR is the speed,
349 * though magic-aneg shouldn't prevent this case from occurring
350 */
351
352 return 0;
353}
354
355static int marvell_init(struct ugeth_mii_info *mii_info)
356{
357 ugphy_vdbg("%s: IN", __FUNCTION__);
358
359 phy_write(mii_info, 0x14, 0x0cd2);
360 phy_write(mii_info, MII_BMCR,
361 phy_read(mii_info, MII_BMCR) | BMCR_RESET);
362 msleep(4000);
363
364 return 0;
365}
366
367static int marvell_config_aneg(struct ugeth_mii_info *mii_info)
368{
369 ugphy_vdbg("%s: IN", __FUNCTION__);
370
371 /* The Marvell PHY has an errata which requires
372 * that certain registers get written in order
373 * to restart autonegotiation */
374 phy_write(mii_info, MII_BMCR, BMCR_RESET);
375
376 phy_write(mii_info, 0x1d, 0x1f);
377 phy_write(mii_info, 0x1e, 0x200c);
378 phy_write(mii_info, 0x1d, 0x5);
379 phy_write(mii_info, 0x1e, 0);
380 phy_write(mii_info, 0x1e, 0x100);
381
382 gbit_config_aneg(mii_info);
383
384 return 0;
385}
386
387static int marvell_read_status(struct ugeth_mii_info *mii_info)
388{
389 u16 status;
390 int err;
391
392 ugphy_vdbg("%s: IN", __FUNCTION__);
393
394 /* Update the link, but return if there
395 * was an error */
396 err = genmii_update_link(mii_info);
397 if (err)
398 return err;
399
400 /* If the link is up, read the speed and duplex */
401 /* If we aren't autonegotiating, assume speeds
402 * are as set */
403 if (mii_info->autoneg && mii_info->link) {
404 int speed;
405 status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
406
407 /* Get the duplexity */
408 if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
409 mii_info->duplex = DUPLEX_FULL;
410 else
411 mii_info->duplex = DUPLEX_HALF;
412
413 /* Get the speed */
414 speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
415 switch (speed) {
416 case MII_M1011_PHY_SPEC_STATUS_1000:
417 mii_info->speed = SPEED_1000;
418 break;
419 case MII_M1011_PHY_SPEC_STATUS_100:
420 mii_info->speed = SPEED_100;
421 break;
422 default:
423 mii_info->speed = SPEED_10;
424 break;
425 }
426 mii_info->pause = 0;
427 }
428
429 return 0;
430}
431
432static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info)
433{
434 ugphy_vdbg("%s: IN", __FUNCTION__);
435
436 /* Clear the interrupts by reading the reg */
437 phy_read(mii_info, MII_M1011_IEVENT);
438
439 return 0;
440}
441
442static int marvell_config_intr(struct ugeth_mii_info *mii_info)
443{
444 ugphy_vdbg("%s: IN", __FUNCTION__);
445
446 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
447 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
448 else
449 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
450
451 return 0;
452}
453
454static int cis820x_init(struct ugeth_mii_info *mii_info)
455{
456 ugphy_vdbg("%s: IN", __FUNCTION__);
457
458 phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
459 MII_CIS8201_AUXCONSTAT_INIT);
460 phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT);
461
462 return 0;
463}
464
465static int cis820x_read_status(struct ugeth_mii_info *mii_info)
466{
467 u16 status;
468 int err;
469
470 ugphy_vdbg("%s: IN", __FUNCTION__);
471
472 /* Update the link, but return if there
473 * was an error */
474 err = genmii_update_link(mii_info);
475 if (err)
476 return err;
477
478 /* If the link is up, read the speed and duplex */
479 /* If we aren't autonegotiating, assume speeds
480 * are as set */
481 if (mii_info->autoneg && mii_info->link) {
482 int speed;
483
484 status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
485 if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
486 mii_info->duplex = DUPLEX_FULL;
487 else
488 mii_info->duplex = DUPLEX_HALF;
489
490 speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
491
492 switch (speed) {
493 case MII_CIS8201_AUXCONSTAT_GBIT:
494 mii_info->speed = SPEED_1000;
495 break;
496 case MII_CIS8201_AUXCONSTAT_100:
497 mii_info->speed = SPEED_100;
498 break;
499 default:
500 mii_info->speed = SPEED_10;
501 break;
502 }
503 }
504
505 return 0;
506}
507
508static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info)
509{
510 ugphy_vdbg("%s: IN", __FUNCTION__);
511
512 phy_read(mii_info, MII_CIS8201_ISTAT);
513
514 return 0;
515}
516
517static int cis820x_config_intr(struct ugeth_mii_info *mii_info)
518{
519 ugphy_vdbg("%s: IN", __FUNCTION__);
520
521 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
522 phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
523 else
524 phy_write(mii_info, MII_CIS8201_IMASK, 0);
525
526 return 0;
527}
528
529#define DM9161_DELAY 10
530
531static int dm9161_read_status(struct ugeth_mii_info *mii_info)
532{
533 u16 status;
534 int err;
535
536 ugphy_vdbg("%s: IN", __FUNCTION__);
537
538 /* Update the link, but return if there
539 * was an error */
540 err = genmii_update_link(mii_info);
541 if (err)
542 return err;
543
544 /* If the link is up, read the speed and duplex */
545 /* If we aren't autonegotiating, assume speeds
546 * are as set */
547 if (mii_info->autoneg && mii_info->link) {
548 status = phy_read(mii_info, MII_DM9161_SCSR);
549 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
550 mii_info->speed = SPEED_100;
551 else
552 mii_info->speed = SPEED_10;
553
554 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
555 mii_info->duplex = DUPLEX_FULL;
556 else
557 mii_info->duplex = DUPLEX_HALF;
558 }
559
560 return 0;
561}
562
563static int dm9161_config_aneg(struct ugeth_mii_info *mii_info)
564{
565 struct dm9161_private *priv = mii_info->priv;
566
567 ugphy_vdbg("%s: IN", __FUNCTION__);
568
569 if (0 == priv->resetdone)
570 return -EAGAIN;
571
572 return 0;
573}
574
575static void dm9161_timer(unsigned long data)
576{
577 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
578 struct dm9161_private *priv = mii_info->priv;
579 u16 status = phy_read(mii_info, MII_BMSR);
580
581 ugphy_vdbg("%s: IN", __FUNCTION__);
582
583 if (status & BMSR_ANEGCOMPLETE) {
584 priv->resetdone = 1;
585 } else
586 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
587}
588
589static int dm9161_init(struct ugeth_mii_info *mii_info)
590{
591 struct dm9161_private *priv;
592
593 ugphy_vdbg("%s: IN", __FUNCTION__);
594
595 /* Allocate the private data structure */
596 priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
597
598 if (NULL == priv)
599 return -ENOMEM;
600
601 mii_info->priv = priv;
602
603 /* Reset is not done yet */
604 priv->resetdone = 0;
605
606 phy_write(mii_info, MII_BMCR,
607 phy_read(mii_info, MII_BMCR) | BMCR_RESET);
608
609 phy_write(mii_info, MII_BMCR,
610 phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE);
611
612 config_genmii_advert(mii_info);
613 /* Start/Restart aneg */
614 genmii_config_aneg(mii_info);
615
616 /* Start a timer for DM9161_DELAY seconds to wait
617 * for the PHY to be ready */
618 init_timer(&priv->timer);
619 priv->timer.function = &dm9161_timer;
620 priv->timer.data = (unsigned long)mii_info;
621 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
622
623 return 0;
624}
625
626static void dm9161_close(struct ugeth_mii_info *mii_info)
627{
628 struct dm9161_private *priv = mii_info->priv;
629
630 ugphy_vdbg("%s: IN", __FUNCTION__);
631
632 del_timer_sync(&priv->timer);
633 kfree(priv);
634}
635
636static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info)
637{
638/* FIXME: This lines are for BUG fixing in the mpc8325.
639Remove this from here when it's fixed */
640 if (bcsr_regs == NULL)
641 bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
642 bcsr_regs[14] |= 0x40;
643 ugphy_vdbg("%s: IN", __FUNCTION__);
644
645 /* Clear the interrupts by reading the reg */
646 phy_read(mii_info, MII_DM9161_INTR);
647
648
649 return 0;
650}
651
652static int dm9161_config_intr(struct ugeth_mii_info *mii_info)
653{
654/* FIXME: This lines are for BUG fixing in the mpc8325.
655Remove this from here when it's fixed */
656 if (bcsr_regs == NULL) {
657 bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
658 bcsr_regs[14] &= ~0x40;
659 }
660 ugphy_vdbg("%s: IN", __FUNCTION__);
661
662 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
663 phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
664 else
665 phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
666
667 return 0;
668}
669
670/* Cicada 820x */
671static struct phy_info phy_info_cis820x = {
672 .phy_id = 0x000fc440,
673 .name = "Cicada Cis8204",
674 .phy_id_mask = 0x000fffc0,
675 .features = MII_GBIT_FEATURES,
676 .init = &cis820x_init,
677 .config_aneg = &gbit_config_aneg,
678 .read_status = &cis820x_read_status,
679 .ack_interrupt = &cis820x_ack_interrupt,
680 .config_intr = &cis820x_config_intr,
681};
682
683static struct phy_info phy_info_dm9161 = {
684 .phy_id = 0x0181b880,
685 .phy_id_mask = 0x0ffffff0,
686 .name = "Davicom DM9161E",
687 .init = dm9161_init,
688 .config_aneg = dm9161_config_aneg,
689 .read_status = dm9161_read_status,
690 .close = dm9161_close,
691};
692
693static struct phy_info phy_info_dm9161a = {
694 .phy_id = 0x0181b8a0,
695 .phy_id_mask = 0x0ffffff0,
696 .name = "Davicom DM9161A",
697 .features = MII_BASIC_FEATURES,
698 .init = dm9161_init,
699 .config_aneg = dm9161_config_aneg,
700 .read_status = dm9161_read_status,
701 .ack_interrupt = dm9161_ack_interrupt,
702 .config_intr = dm9161_config_intr,
703 .close = dm9161_close,
704};
705
706static struct phy_info phy_info_marvell = {
707 .phy_id = 0x01410c00,
708 .phy_id_mask = 0xffffff00,
709 .name = "Marvell 88E11x1",
710 .features = MII_GBIT_FEATURES,
711 .init = &marvell_init,
712 .config_aneg = &marvell_config_aneg,
713 .read_status = &marvell_read_status,
714 .ack_interrupt = &marvell_ack_interrupt,
715 .config_intr = &marvell_config_intr,
716};
717
718static struct phy_info phy_info_genmii = {
719 .phy_id = 0x00000000,
720 .phy_id_mask = 0x00000000,
721 .name = "Generic MII",
722 .features = MII_BASIC_FEATURES,
723 .config_aneg = genmii_config_aneg,
724 .read_status = genmii_read_status,
725};
726
727static struct phy_info *phy_info[] = {
728 &phy_info_cis820x,
729 &phy_info_marvell,
730 &phy_info_dm9161,
731 &phy_info_dm9161a,
732 &phy_info_genmii,
733 NULL
734};
735
736u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum)
737{
738 u16 retval;
739 unsigned long flags;
740
741 ugphy_vdbg("%s: IN", __FUNCTION__);
742
743 spin_lock_irqsave(&mii_info->mdio_lock, flags);
744 retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
745 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
746
747 return retval;
748}
749
750void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val)
751{
752 unsigned long flags;
753
754 ugphy_vdbg("%s: IN", __FUNCTION__);
755
756 spin_lock_irqsave(&mii_info->mdio_lock, flags);
757 mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val);
758 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
759}
760
761/* Use the PHY ID registers to determine what type of PHY is attached
762 * to device dev. return a struct phy_info structure describing that PHY
763 */
764struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info)
765{
766 u16 phy_reg;
767 u32 phy_ID;
768 int i;
769 struct phy_info *theInfo = NULL;
770 struct net_device *dev = mii_info->dev;
771
772 ugphy_vdbg("%s: IN", __FUNCTION__);
773
774 /* Grab the bits from PHYIR1, and put them in the upper half */
775 phy_reg = phy_read(mii_info, MII_PHYSID1);
776 phy_ID = (phy_reg & 0xffff) << 16;
777
778 /* Grab the bits from PHYIR2, and put them in the lower half */
779 phy_reg = phy_read(mii_info, MII_PHYSID2);
780 phy_ID |= (phy_reg & 0xffff);
781
782 /* loop through all the known PHY types, and find one that */
783 /* matches the ID we read from the PHY. */
784 for (i = 0; phy_info[i]; i++)
785 if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){
786 theInfo = phy_info[i];
787 break;
788 }
789
790 /* This shouldn't happen, as we have generic PHY support */
791 if (theInfo == NULL) {
792 ugphy_info("%s: PHY id %x is not supported!", dev->name,
793 phy_ID);
794 return NULL;
795 } else {
796 ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name,
797 phy_ID);
798 }
799
800 return theInfo;
801}
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h
new file mode 100644
index 000000000000..2f98b8f1bb0a
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.h
@@ -0,0 +1,217 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * UCC GETH Driver -- PHY handling
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __UCC_GETH_PHY_H__
20#define __UCC_GETH_PHY_H__
21
22#define MII_end ((u32)-2)
23#define MII_read ((u32)-1)
24
25#define MIIMIND_BUSY 0x00000001
26#define MIIMIND_NOTVALID 0x00000004
27
28#define UGETH_AN_TIMEOUT 2000
29
30/* 1000BT control (Marvell & BCM54xx at least) */
31#define MII_1000BASETCONTROL 0x09
32#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
33#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
34
35/* Cicada Extended Control Register 1 */
36#define MII_CIS8201_EXT_CON1 0x17
37#define MII_CIS8201_EXTCON1_INIT 0x0000
38
39/* Cicada Interrupt Mask Register */
40#define MII_CIS8201_IMASK 0x19
41#define MII_CIS8201_IMASK_IEN 0x8000
42#define MII_CIS8201_IMASK_SPEED 0x4000
43#define MII_CIS8201_IMASK_LINK 0x2000
44#define MII_CIS8201_IMASK_DUPLEX 0x1000
45#define MII_CIS8201_IMASK_MASK 0xf000
46
47/* Cicada Interrupt Status Register */
48#define MII_CIS8201_ISTAT 0x1a
49#define MII_CIS8201_ISTAT_STATUS 0x8000
50#define MII_CIS8201_ISTAT_SPEED 0x4000
51#define MII_CIS8201_ISTAT_LINK 0x2000
52#define MII_CIS8201_ISTAT_DUPLEX 0x1000
53
54/* Cicada Auxiliary Control/Status Register */
55#define MII_CIS8201_AUX_CONSTAT 0x1c
56#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
57#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
58#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
59#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
60#define MII_CIS8201_AUXCONSTAT_100 0x0008
61
62/* 88E1011 PHY Status Register */
63#define MII_M1011_PHY_SPEC_STATUS 0x11
64#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
65#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
66#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
67#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
68#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
69#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
70
71#define MII_M1011_IEVENT 0x13
72#define MII_M1011_IEVENT_CLEAR 0x0000
73
74#define MII_M1011_IMASK 0x12
75#define MII_M1011_IMASK_INIT 0x6400
76#define MII_M1011_IMASK_CLEAR 0x0000
77
78#define MII_DM9161_SCR 0x10
79#define MII_DM9161_SCR_INIT 0x0610
80
81/* DM9161 Specified Configuration and Status Register */
82#define MII_DM9161_SCSR 0x11
83#define MII_DM9161_SCSR_100F 0x8000
84#define MII_DM9161_SCSR_100H 0x4000
85#define MII_DM9161_SCSR_10F 0x2000
86#define MII_DM9161_SCSR_10H 0x1000
87
88/* DM9161 Interrupt Register */
89#define MII_DM9161_INTR 0x15
90#define MII_DM9161_INTR_PEND 0x8000
91#define MII_DM9161_INTR_DPLX_MASK 0x0800
92#define MII_DM9161_INTR_SPD_MASK 0x0400
93#define MII_DM9161_INTR_LINK_MASK 0x0200
94#define MII_DM9161_INTR_MASK 0x0100
95#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
96#define MII_DM9161_INTR_SPD_CHANGE 0x0008
97#define MII_DM9161_INTR_LINK_CHANGE 0x0004
98#define MII_DM9161_INTR_INIT 0x0000
99#define MII_DM9161_INTR_STOP \
100(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
101 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
102
103/* DM9161 10BT Configuration/Status */
104#define MII_DM9161_10BTCSR 0x12
105#define MII_DM9161_10BTCSR_INIT 0x7800
106
107#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
108 SUPPORTED_10baseT_Full | \
109 SUPPORTED_100baseT_Half | \
110 SUPPORTED_100baseT_Full | \
111 SUPPORTED_Autoneg | \
112 SUPPORTED_TP | \
113 SUPPORTED_MII)
114
115#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
116 SUPPORTED_1000baseT_Half | \
117 SUPPORTED_1000baseT_Full)
118
119#define MII_READ_COMMAND 0x00000001
120
121#define MII_INTERRUPT_DISABLED 0x0
122#define MII_INTERRUPT_ENABLED 0x1
123/* Taken from mii_if_info and sungem_phy.h */
124struct ugeth_mii_info {
125 /* Information about the PHY type */
126 /* And management functions */
127 struct phy_info *phyinfo;
128
129 ucc_mii_mng_t *mii_regs;
130
131 /* forced speed & duplex (no autoneg)
132 * partner speed & duplex & pause (autoneg)
133 */
134 int speed;
135 int duplex;
136 int pause;
137
138 /* The most recently read link state */
139 int link;
140
141 /* Enabled Interrupts */
142 u32 interrupts;
143
144 u32 advertising;
145 int autoneg;
146 int mii_id;
147
148 /* private data pointer */
149 /* For use by PHYs to maintain extra state */
150 void *priv;
151
152 /* Provided by host chip */
153 struct net_device *dev;
154
155 /* A lock to ensure that only one thing can read/write
156 * the MDIO bus at a time */
157 spinlock_t mdio_lock;
158
159 /* Provided by ethernet driver */
160 int (*mdio_read) (struct net_device * dev, int mii_id, int reg);
161 void (*mdio_write) (struct net_device * dev, int mii_id, int reg,
162 int val);
163};
164
165/* struct phy_info: a structure which defines attributes for a PHY
166 *
167 * id will contain a number which represents the PHY. During
168 * startup, the driver will poll the PHY to find out what its
169 * UID--as defined by registers 2 and 3--is. The 32-bit result
170 * gotten from the PHY will be ANDed with phy_id_mask to
171 * discard any bits which may change based on revision numbers
172 * unimportant to functionality
173 *
174 * There are 6 commands which take a ugeth_mii_info structure.
175 * Each PHY must declare config_aneg, and read_status.
176 */
177struct phy_info {
178 u32 phy_id;
179 char *name;
180 unsigned int phy_id_mask;
181 u32 features;
182
183 /* Called to initialize the PHY */
184 int (*init) (struct ugeth_mii_info * mii_info);
185
186 /* Called to suspend the PHY for power */
187 int (*suspend) (struct ugeth_mii_info * mii_info);
188
189 /* Reconfigures autonegotiation (or disables it) */
190 int (*config_aneg) (struct ugeth_mii_info * mii_info);
191
192 /* Determines the negotiated speed and duplex */
193 int (*read_status) (struct ugeth_mii_info * mii_info);
194
195 /* Clears any pending interrupts */
196 int (*ack_interrupt) (struct ugeth_mii_info * mii_info);
197
198 /* Enables or disables interrupts */
199 int (*config_intr) (struct ugeth_mii_info * mii_info);
200
201 /* Clears up any memory if needed */
202 void (*close) (struct ugeth_mii_info * mii_info);
203};
204
205struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info);
206void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
207int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
208void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info);
209void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
210 u32 interrupts);
211
212struct dm9161_private {
213 struct timer_list timer;
214 int resetdone;
215};
216
217#endif /* __UCC_GETH_PHY_H__ */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index d3d0ec970318..f7bc44f2d96a 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.0" 33#define DRV_VERSION "1.4.2"
34#define DRV_RELDATE "June-27-2006" 34#define DRV_RELDATE "Sept-11-2006"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -44,6 +44,10 @@ static int max_interrupt_work = 20;
44 Setting to > 1518 effectively disables this feature. */ 44 Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak; 45static int rx_copybreak;
46 46
47/* Work-around for broken BIOSes: they are unable to get the chip back out of
48 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
49static int avoid_D3;
50
47/* 51/*
48 * In case you are looking for 'options[]' or 'full_duplex[]', they 52 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead. 53 * are gone. Use ethtool(8) instead.
@@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32;
63 There are no ill effects from too-large receive rings. */ 67 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16 68#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 69#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
70#ifdef CONFIG_VIA_RHINE_NAPI
71#define RX_RING_SIZE 64
72#else
66#define RX_RING_SIZE 16 73#define RX_RING_SIZE 16
74#endif
67 75
68 76
69/* Operational parameters that usually are not changed. */ 77/* Operational parameters that usually are not changed. */
@@ -116,9 +124,11 @@ MODULE_LICENSE("GPL");
116module_param(max_interrupt_work, int, 0); 124module_param(max_interrupt_work, int, 0);
117module_param(debug, int, 0); 125module_param(debug, int, 0);
118module_param(rx_copybreak, int, 0); 126module_param(rx_copybreak, int, 0);
127module_param(avoid_D3, bool, 0);
119MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); 128MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
120MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 129MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
121MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 130MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
131MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
122 132
123/* 133/*
124 Theory of Operation 134 Theory of Operation
@@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev);
396static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 406static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
397static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 407static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
398static void rhine_tx(struct net_device *dev); 408static void rhine_tx(struct net_device *dev);
399static void rhine_rx(struct net_device *dev); 409static int rhine_rx(struct net_device *dev, int limit);
400static void rhine_error(struct net_device *dev, int intr_status); 410static void rhine_error(struct net_device *dev, int intr_status);
401static void rhine_set_rx_mode(struct net_device *dev); 411static void rhine_set_rx_mode(struct net_device *dev);
402static struct net_device_stats *rhine_get_stats(struct net_device *dev); 412static struct net_device_stats *rhine_get_stats(struct net_device *dev);
@@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev)
564} 574}
565#endif 575#endif
566 576
577#ifdef CONFIG_VIA_RHINE_NAPI
578static int rhine_napipoll(struct net_device *dev, int *budget)
579{
580 struct rhine_private *rp = netdev_priv(dev);
581 void __iomem *ioaddr = rp->base;
582 int done, limit = min(dev->quota, *budget);
583
584 done = rhine_rx(dev, limit);
585 *budget -= done;
586 dev->quota -= done;
587
588 if (done < limit) {
589 netif_rx_complete(dev);
590
591 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
592 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
593 IntrTxDone | IntrTxError | IntrTxUnderrun |
594 IntrPCIErr | IntrStatsMax | IntrLinkChange,
595 ioaddr + IntrEnable);
596 return 0;
597 }
598 else
599 return 1;
600}
601#endif
602
567static void rhine_hw_init(struct net_device *dev, long pioaddr) 603static void rhine_hw_init(struct net_device *dev, long pioaddr)
568{ 604{
569 struct rhine_private *rp = netdev_priv(dev); 605 struct rhine_private *rp = netdev_priv(dev);
@@ -744,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
744#ifdef CONFIG_NET_POLL_CONTROLLER 780#ifdef CONFIG_NET_POLL_CONTROLLER
745 dev->poll_controller = rhine_poll; 781 dev->poll_controller = rhine_poll;
746#endif 782#endif
783#ifdef CONFIG_VIA_RHINE_NAPI
784 dev->poll = rhine_napipoll;
785 dev->weight = 64;
786#endif
747 if (rp->quirks & rqRhineI) 787 if (rp->quirks & rqRhineI)
748 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 788 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
749 789
@@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
789 } 829 }
790 } 830 }
791 rp->mii_if.phy_id = phy_id; 831 rp->mii_if.phy_id = phy_id;
832 if (debug > 1 && avoid_D3)
833 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
834 dev->name);
792 835
793 return 0; 836 return 0;
794 837
@@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev)
1014 1057
1015 rhine_set_rx_mode(dev); 1058 rhine_set_rx_mode(dev);
1016 1059
1060 netif_poll_enable(dev);
1061
1017 /* Enable interrupts by setting the interrupt mask. */ 1062 /* Enable interrupts by setting the interrupt mask. */
1018 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 1063 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1019 IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 1064 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
1268 dev->name, intr_status); 1313 dev->name, intr_status);
1269 1314
1270 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1315 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1271 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) 1316 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1272 rhine_rx(dev); 1317#ifdef CONFIG_VIA_RHINE_NAPI
1318 iowrite16(IntrTxAborted |
1319 IntrTxDone | IntrTxError | IntrTxUnderrun |
1320 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1321 ioaddr + IntrEnable);
1322
1323 netif_rx_schedule(dev);
1324#else
1325 rhine_rx(dev, RX_RING_SIZE);
1326#endif
1327 }
1273 1328
1274 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1329 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1275 if (intr_status & IntrTxErrSummary) { 1330 if (intr_status & IntrTxErrSummary) {
@@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev)
1367 spin_unlock(&rp->lock); 1422 spin_unlock(&rp->lock);
1368} 1423}
1369 1424
1370/* This routine is logically part of the interrupt handler, but isolated 1425/* Process up to limit frames from receive ring */
1371 for clarity and better register allocation. */ 1426static int rhine_rx(struct net_device *dev, int limit)
1372static void rhine_rx(struct net_device *dev)
1373{ 1427{
1374 struct rhine_private *rp = netdev_priv(dev); 1428 struct rhine_private *rp = netdev_priv(dev);
1429 int count;
1375 int entry = rp->cur_rx % RX_RING_SIZE; 1430 int entry = rp->cur_rx % RX_RING_SIZE;
1376 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1377 1431
1378 if (debug > 4) { 1432 if (debug > 4) {
1379 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", 1433 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
@@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev)
1382 } 1436 }
1383 1437
1384 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1438 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1385 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { 1439 for (count = 0; count < limit; ++count) {
1386 struct rx_desc *desc = rp->rx_head_desc; 1440 struct rx_desc *desc = rp->rx_head_desc;
1387 u32 desc_status = le32_to_cpu(desc->rx_status); 1441 u32 desc_status = le32_to_cpu(desc->rx_status);
1388 int data_size = desc_status >> 16; 1442 int data_size = desc_status >> 16;
1389 1443
1444 if (desc_status & DescOwn)
1445 break;
1446
1390 if (debug > 4) 1447 if (debug > 4)
1391 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", 1448 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1392 desc_status); 1449 desc_status);
1393 if (--boguscnt < 0) 1450
1394 break;
1395 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1451 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1396 if ((desc_status & RxWholePkt) != RxWholePkt) { 1452 if ((desc_status & RxWholePkt) != RxWholePkt) {
1397 printk(KERN_WARNING "%s: Oversized Ethernet " 1453 printk(KERN_WARNING "%s: Oversized Ethernet "
@@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev)
1460 PCI_DMA_FROMDEVICE); 1516 PCI_DMA_FROMDEVICE);
1461 } 1517 }
1462 skb->protocol = eth_type_trans(skb, dev); 1518 skb->protocol = eth_type_trans(skb, dev);
1519#ifdef CONFIG_VIA_RHINE_NAPI
1520 netif_receive_skb(skb);
1521#else
1463 netif_rx(skb); 1522 netif_rx(skb);
1523#endif
1464 dev->last_rx = jiffies; 1524 dev->last_rx = jiffies;
1465 rp->stats.rx_bytes += pkt_len; 1525 rp->stats.rx_bytes += pkt_len;
1466 rp->stats.rx_packets++; 1526 rp->stats.rx_packets++;
@@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev)
1487 } 1547 }
1488 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); 1548 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1489 } 1549 }
1550
1551 return count;
1490} 1552}
1491 1553
1492/* 1554/*
@@ -1617,9 +1679,6 @@ static void rhine_set_rx_mode(struct net_device *dev)
1617 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ 1679 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1618 1680
1619 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1681 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1620 /* Unconditionally log net taps. */
1621 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1622 dev->name);
1623 rx_mode = 0x1C; 1682 rx_mode = 0x1C;
1624 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1683 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1625 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1684 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
@@ -1776,6 +1835,7 @@ static int rhine_close(struct net_device *dev)
1776 spin_lock_irq(&rp->lock); 1835 spin_lock_irq(&rp->lock);
1777 1836
1778 netif_stop_queue(dev); 1837 netif_stop_queue(dev);
1838 netif_poll_disable(dev);
1779 1839
1780 if (debug > 1) 1840 if (debug > 1)
1781 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1841 printk(KERN_DEBUG "%s: Shutting down ethercard, "
@@ -1857,7 +1917,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
1857 } 1917 }
1858 1918
1859 /* Hit power state D3 (sleep) */ 1919 /* Hit power state D3 (sleep) */
1860 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 1920 if (!avoid_D3)
1921 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1861 1922
1862 /* TODO: Check use of pci_enable_wake() */ 1923 /* TODO: Check use of pci_enable_wake() */
1863 1924
@@ -1941,7 +2002,7 @@ static int __init rhine_init(void)
1941#ifdef MODULE 2002#ifdef MODULE
1942 printk(version); 2003 printk(version);
1943#endif 2004#endif
1944 return pci_module_init(&rhine_driver); 2005 return pci_register_driver(&rhine_driver);
1945} 2006}
1946 2007
1947 2008
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index f5b0078eb4ad..f23d207ad3fc 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2109,8 +2109,6 @@ static void velocity_set_multi(struct net_device *dev)
2109 struct dev_mc_list *mclist; 2109 struct dev_mc_list *mclist;
2110 2110
2111 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 2111 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2112 /* Unconditionally log net taps. */
2113 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
2114 writel(0xffffffff, &regs->MARCAM[0]); 2112 writel(0xffffffff, &regs->MARCAM[0]);
2115 writel(0xffffffff, &regs->MARCAM[4]); 2113 writel(0xffffffff, &regs->MARCAM[4]);
2116 rx_mode = (RCR_AM | RCR_AB | RCR_PROM); 2114 rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
@@ -2250,7 +2248,7 @@ static int __init velocity_init_module(void)
2250 int ret; 2248 int ret;
2251 2249
2252 velocity_register_notifier(); 2250 velocity_register_notifier();
2253 ret = pci_module_init(&velocity_driver); 2251 ret = pci_register_driver(&velocity_driver);
2254 if (ret < 0) 2252 if (ret < 0)
2255 velocity_unregister_notifier(); 2253 velocity_unregister_notifier();
2256 return ret; 2254 return ret;
@@ -2742,7 +2740,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
2742 2740
2743 if (PHYSR0 & PHYSR0_SPDG) 2741 if (PHYSR0 & PHYSR0_SPDG)
2744 status |= VELOCITY_SPEED_1000; 2742 status |= VELOCITY_SPEED_1000;
2745 if (PHYSR0 & PHYSR0_SPD10) 2743 else if (PHYSR0 & PHYSR0_SPD10)
2746 status |= VELOCITY_SPEED_10; 2744 status |= VELOCITY_SPEED_10;
2747 else 2745 else
2748 status |= VELOCITY_SPEED_100; 2746 status |= VELOCITY_SPEED_100;
@@ -2851,8 +2849,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
2851 u32 status; 2849 u32 status;
2852 status = check_connection_type(vptr->mac_regs); 2850 status = check_connection_type(vptr->mac_regs);
2853 2851
2854 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 2852 cmd->supported = SUPPORTED_TP |
2855 if (status & VELOCITY_SPEED_100) 2853 SUPPORTED_Autoneg |
2854 SUPPORTED_10baseT_Half |
2855 SUPPORTED_10baseT_Full |
2856 SUPPORTED_100baseT_Half |
2857 SUPPORTED_100baseT_Full |
2858 SUPPORTED_1000baseT_Half |
2859 SUPPORTED_1000baseT_Full;
2860 if (status & VELOCITY_SPEED_1000)
2861 cmd->speed = SPEED_1000;
2862 else if (status & VELOCITY_SPEED_100)
2856 cmd->speed = SPEED_100; 2863 cmd->speed = SPEED_100;
2857 else 2864 else
2858 cmd->speed = SPEED_10; 2865 cmd->speed = SPEED_10;
@@ -2896,7 +2903,7 @@ static u32 velocity_get_link(struct net_device *dev)
2896{ 2903{
2897 struct velocity_info *vptr = netdev_priv(dev); 2904 struct velocity_info *vptr = netdev_priv(dev);
2898 struct mac_regs __iomem * regs = vptr->mac_regs; 2905 struct mac_regs __iomem * regs = vptr->mac_regs;
2899 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 0 : 1; 2906 return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2900} 2907}
2901 2908
2902static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2909static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 496c3d597444..82968e46d5df 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -29,7 +29,7 @@
29 29
30#define VELOCITY_NAME "via-velocity" 30#define VELOCITY_NAME "via-velocity"
31#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" 31#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
32#define VELOCITY_VERSION "1.13" 32#define VELOCITY_VERSION "1.14"
33 33
34#define VELOCITY_IO_SIZE 256 34#define VELOCITY_IO_SIZE 256
35 35
@@ -262,25 +262,6 @@ struct velocity_rd_info {
262 dma_addr_t skb_dma; 262 dma_addr_t skb_dma;
263}; 263};
264 264
265/**
266 * alloc_rd_info - allocate an rd info block
267 *
268 * Alocate and initialize a receive info structure used for keeping
269 * track of kernel side information related to each receive
270 * descriptor we are using
271 */
272
273static inline struct velocity_rd_info *alloc_rd_info(void)
274{
275 struct velocity_rd_info *ptr;
276 if ((ptr = kmalloc(sizeof(struct velocity_rd_info), GFP_ATOMIC)) == NULL)
277 return NULL;
278 else {
279 memset(ptr, 0, sizeof(struct velocity_rd_info));
280 return ptr;
281 }
282}
283
284/* 265/*
285 * Used to track transmit side buffers. 266 * Used to track transmit side buffers.
286 */ 267 */
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 435e91ec4620..6b63b350cd52 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page)
118 118
119static inline void set_carrier(port_t *port) 119static inline void set_carrier(port_t *port)
120{ 120{
121 if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) 121 if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
122 netif_carrier_on(port_to_dev(port)); 122 netif_carrier_on(port_to_dev(port));
123 else 123 else
124 netif_carrier_off(port_to_dev(port)); 124 netif_carrier_off(port_to_dev(port));
@@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port)
127 127
128static void sca_msci_intr(port_t *port) 128static void sca_msci_intr(port_t *port)
129{ 129{
130 u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ 130 u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
131 131
132 /* Reset MSCI TX underrun status bit */ 132 /* Reset MSCI TX underrun and CDCD (ignored) status bit */
133 sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); 133 sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
134 134
135 if (stat & ST1_UDRN) { 135 if (stat & ST1_UDRN) {
136 struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); 136 struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
@@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port)
138 stats->tx_fifo_errors++; 138 stats->tx_fifo_errors++;
139 } 139 }
140 140
141 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
141 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ 142 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
142 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); 143 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
143 144
diff --git a/drivers/net/wan/cycx_main.c b/drivers/net/wan/cycx_main.c
index 430b1f630fb4..a5e7ce1bd16a 100644
--- a/drivers/net/wan/cycx_main.c
+++ b/drivers/net/wan/cycx_main.c
@@ -40,7 +40,6 @@
40* 1998/08/08 acme Initial version. 40* 1998/08/08 acme Initial version.
41*/ 41*/
42 42
43#include <linux/config.h> /* OS configuration options */
44#include <linux/stddef.h> /* offsetof(), etc. */ 43#include <linux/stddef.h> /* offsetof(), etc. */
45#include <linux/errno.h> /* return codes */ 44#include <linux/errno.h> /* return codes */
46#include <linux/string.h> /* inline memset(), etc. */ 45#include <linux/string.h> /* inline memset(), etc. */
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 6e1ec5bf22fc..736987559432 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -28,7 +28,6 @@
28 * 2 of the License, or (at your option) any later version. 28 * 2 of the License, or (at your option) any later version.
29 */ 29 */
30 30
31#include <linux/config.h> /* for CONFIG_DLCI_COUNT */
32#include <linux/module.h> 31#include <linux/module.h>
33#include <linux/kernel.h> 32#include <linux/kernel.h>
34#include <linux/types.h> 33#include <linux/types.h>
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 684af4316ffd..af4d4155905b 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2062,7 +2062,7 @@ static struct pci_driver dscc4_driver = {
2062 2062
2063static int __init dscc4_init_module(void) 2063static int __init dscc4_init_module(void)
2064{ 2064{
2065 return pci_module_init(&dscc4_driver); 2065 return pci_register_driver(&dscc4_driver);
2066} 2066}
2067 2067
2068static void __exit dscc4_cleanup_module(void) 2068static void __exit dscc4_cleanup_module(void)
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 3705db04a343..564351aafa41 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2697,7 +2697,7 @@ fst_init(void)
2697 for (i = 0; i < FST_MAX_CARDS; i++) 2697 for (i = 0; i < FST_MAX_CARDS; i++)
2698 fst_card_array[i] = NULL; 2698 fst_card_array[i] = NULL;
2699 spin_lock_init(&fst_work_q_lock); 2699 spin_lock_init(&fst_work_q_lock);
2700 return pci_module_init(&fst_driver); 2700 return pci_register_driver(&fst_driver);
2701} 2701}
2702 2702
2703static void __exit 2703static void __exit
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 39f44241a728..7b5d81deb028 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1790,7 +1790,7 @@ static struct pci_driver lmc_driver = {
1790 1790
1791static int __init init_lmc(void) 1791static int __init init_lmc(void)
1792{ 1792{
1793 return pci_module_init(&lmc_driver); 1793 return pci_register_driver(&lmc_driver);
1794} 1794}
1795 1795
1796static void __exit exit_lmc(void) 1796static void __exit exit_lmc(void)
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 567effff4a3e..56e69403d178 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -3677,7 +3677,7 @@ static struct pci_driver cpc_driver = {
3677 3677
3678static int __init cpc_init(void) 3678static int __init cpc_init(void)
3679{ 3679{
3680 return pci_module_init(&cpc_driver); 3680 return pci_register_driver(&cpc_driver);
3681} 3681}
3682 3682
3683static void __exit cpc_cleanup_module(void) 3683static void __exit cpc_cleanup_module(void)
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index 4df61fa3214b..a6b9c33b68e4 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -476,7 +476,7 @@ static int __init pci200_init_module(void)
476 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n"); 476 printk(KERN_ERR "pci200syn: Invalid PCI clock frequency\n");
477 return -EINVAL; 477 return -EINVAL;
478 } 478 }
479 return pci_module_init(&pci200_pci_driver); 479 return pci_register_driver(&pci200_pci_driver);
480} 480}
481 481
482 482
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 7628c2d81f45..0ba018f8382b 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -32,7 +32,6 @@
32 * 2 of the License, or (at your option) any later version. 32 * 2 of the License, or (at your option) any later version.
33 */ 33 */
34 34
35#include <linux/config.h> /* for CONFIG_DLCI_MAX */
36#include <linux/module.h> 35#include <linux/module.h>
37#include <linux/kernel.h> 36#include <linux/kernel.h>
38#include <linux/types.h> 37#include <linux/types.h>
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index b2031dfc4bb1..ec68f7dfd93f 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -837,7 +837,7 @@ static int __init wanxl_init_module(void)
837#ifdef MODULE 837#ifdef MODULE
838 printk(KERN_INFO "%s\n", version); 838 printk(KERN_INFO "%s\n", version);
839#endif 839#endif
840 return pci_module_init(&wanxl_pci_driver); 840 return pci_register_driver(&wanxl_pci_driver);
841} 841}
842 842
843static void __exit wanxl_cleanup_module(void) 843static void __exit wanxl_cleanup_module(void)
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 7caa8dc88a58..b1ba1872f315 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -500,8 +500,8 @@ MODULE_LICENSE("GPL");
500 500
501/* This is set up so that only a single autoprobe takes place per call. 501/* This is set up so that only a single autoprobe takes place per call.
502ISA device autoprobes on a running machine are not recommended. */ 502ISA device autoprobes on a running machine are not recommended. */
503int 503
504init_module(void) 504int __init init_module(void)
505{ 505{
506 struct net_device *dev; 506 struct net_device *dev;
507 int this_dev, found = 0; 507 int this_dev, found = 0;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa9d2c4edc93..bd4a68c85a47 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -271,25 +271,14 @@ config IPW2200_DEBUG
271 bool "Enable full debugging output in IPW2200 module." 271 bool "Enable full debugging output in IPW2200 module."
272 depends on IPW2200 272 depends on IPW2200
273 ---help--- 273 ---help---
274 This option will enable debug tracing output for the IPW2200. 274 This option will enable low level debug tracing output for IPW2200.
275 275
276 This will result in the kernel module being ~100k larger. You can 276 Note, normal debug code is already compiled in. This low level
277 control which debug output is sent to the kernel log by setting the 277 debug option enables debug on hot paths (e.g Tx, Rx, ISR) and
278 value in 278 will result in the kernel module being ~70 larger. Most users
279 279 will typically not need this high verbosity debug information.
280 /sys/bus/pci/drivers/ipw2200/debug_level
281
282 This entry will only exist if this option is enabled.
283 280
284 To set a value, simply echo an 8-byte hex value to the same file: 281 If you are not sure, say N here.
285
286 % echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level
287
288 You can find the list of debug mask values in
289 drivers/net/wireless/ipw2200.h
290
291 If you are not trying to debug or develop the IPW2200 driver, you
292 most likely want to say N here.
293 282
294config AIRO 283config AIRO
295 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 284 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
@@ -447,6 +436,7 @@ config AIRO_CS
447 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 436 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
448 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R) 437 depends on NET_RADIO && PCMCIA && (BROKEN || !M32R)
449 select CRYPTO 438 select CRYPTO
439 select CRYPTO_AES
450 ---help--- 440 ---help---
451 This is the standard Linux driver to support Cisco/Aironet PCMCIA 441 This is the standard Linux driver to support Cisco/Aironet PCMCIA
452 802.11 wireless cards. This driver is the same as the Aironet 442 802.11 wireless cards. This driver is the same as the Aironet
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a4dd13942714..e088ceefb4a3 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -47,6 +47,7 @@
47#include <linux/pci.h> 47#include <linux/pci.h>
48#include <asm/uaccess.h> 48#include <asm/uaccess.h>
49#include <net/ieee80211.h> 49#include <net/ieee80211.h>
50#include <linux/kthread.h>
50 51
51#include "airo.h" 52#include "airo.h"
52 53
@@ -1187,11 +1188,10 @@ struct airo_info {
1187 int whichbap); 1188 int whichbap);
1188 unsigned short *flash; 1189 unsigned short *flash;
1189 tdsRssiEntry *rssi; 1190 tdsRssiEntry *rssi;
1190 struct task_struct *task; 1191 struct task_struct *list_bss_task;
1192 struct task_struct *airo_thread_task;
1191 struct semaphore sem; 1193 struct semaphore sem;
1192 pid_t thr_pid;
1193 wait_queue_head_t thr_wait; 1194 wait_queue_head_t thr_wait;
1194 struct completion thr_exited;
1195 unsigned long expires; 1195 unsigned long expires;
1196 struct { 1196 struct {
1197 struct sk_buff *skb; 1197 struct sk_buff *skb;
@@ -1733,12 +1733,12 @@ static int readBSSListRid(struct airo_info *ai, int first,
1733 cmd.cmd=CMD_LISTBSS; 1733 cmd.cmd=CMD_LISTBSS;
1734 if (down_interruptible(&ai->sem)) 1734 if (down_interruptible(&ai->sem))
1735 return -ERESTARTSYS; 1735 return -ERESTARTSYS;
1736 ai->list_bss_task = current;
1736 issuecommand(ai, &cmd, &rsp); 1737 issuecommand(ai, &cmd, &rsp);
1737 up(&ai->sem); 1738 up(&ai->sem);
1738 /* Let the command take effect */ 1739 /* Let the command take effect */
1739 ai->task = current; 1740 schedule_timeout_uninterruptible(3 * HZ);
1740 ssleep(3); 1741 ai->list_bss_task = NULL;
1741 ai->task = NULL;
1742 } 1742 }
1743 rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext, 1743 rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
1744 list, ai->bssListRidLen, 1); 1744 list, ai->bssListRidLen, 1);
@@ -2400,8 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
2400 clear_bit(FLAG_REGISTERED, &ai->flags); 2400 clear_bit(FLAG_REGISTERED, &ai->flags);
2401 } 2401 }
2402 set_bit(JOB_DIE, &ai->jobs); 2402 set_bit(JOB_DIE, &ai->jobs);
2403 kill_proc(ai->thr_pid, SIGTERM, 1); 2403 kthread_stop(ai->airo_thread_task);
2404 wait_for_completion(&ai->thr_exited);
2405 2404
2406 /* 2405 /*
2407 * Clean out tx queue 2406 * Clean out tx queue
@@ -2811,9 +2810,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2811 ai->config.len = 0; 2810 ai->config.len = 0;
2812 ai->pci = pci; 2811 ai->pci = pci;
2813 init_waitqueue_head (&ai->thr_wait); 2812 init_waitqueue_head (&ai->thr_wait);
2814 init_completion (&ai->thr_exited); 2813 ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
2815 ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES); 2814 if (IS_ERR(ai->airo_thread_task))
2816 if (ai->thr_pid < 0)
2817 goto err_out_free; 2815 goto err_out_free;
2818 ai->tfm = NULL; 2816 ai->tfm = NULL;
2819 rc = add_airo_dev( dev ); 2817 rc = add_airo_dev( dev );
@@ -2930,8 +2928,7 @@ err_out_unlink:
2930 del_airo_dev(dev); 2928 del_airo_dev(dev);
2931err_out_thr: 2929err_out_thr:
2932 set_bit(JOB_DIE, &ai->jobs); 2930 set_bit(JOB_DIE, &ai->jobs);
2933 kill_proc(ai->thr_pid, SIGTERM, 1); 2931 kthread_stop(ai->airo_thread_task);
2934 wait_for_completion(&ai->thr_exited);
2935err_out_free: 2932err_out_free:
2936 free_netdev(dev); 2933 free_netdev(dev);
2937 return NULL; 2934 return NULL;
@@ -3063,13 +3060,7 @@ static int airo_thread(void *data) {
3063 struct airo_info *ai = dev->priv; 3060 struct airo_info *ai = dev->priv;
3064 int locked; 3061 int locked;
3065 3062
3066 daemonize("%s", dev->name);
3067 allow_signal(SIGTERM);
3068
3069 while(1) { 3063 while(1) {
3070 if (signal_pending(current))
3071 flush_signals(current);
3072
3073 /* make swsusp happy with our thread */ 3064 /* make swsusp happy with our thread */
3074 try_to_freeze(); 3065 try_to_freeze();
3075 3066
@@ -3097,7 +3088,7 @@ static int airo_thread(void *data) {
3097 set_bit(JOB_AUTOWEP, &ai->jobs); 3088 set_bit(JOB_AUTOWEP, &ai->jobs);
3098 break; 3089 break;
3099 } 3090 }
3100 if (!signal_pending(current)) { 3091 if (!kthread_should_stop()) {
3101 unsigned long wake_at; 3092 unsigned long wake_at;
3102 if (!ai->expires || !ai->scan_timeout) { 3093 if (!ai->expires || !ai->scan_timeout) {
3103 wake_at = max(ai->expires, 3094 wake_at = max(ai->expires,
@@ -3109,7 +3100,7 @@ static int airo_thread(void *data) {
3109 schedule_timeout(wake_at - jiffies); 3100 schedule_timeout(wake_at - jiffies);
3110 continue; 3101 continue;
3111 } 3102 }
3112 } else if (!signal_pending(current)) { 3103 } else if (!kthread_should_stop()) {
3113 schedule(); 3104 schedule();
3114 continue; 3105 continue;
3115 } 3106 }
@@ -3154,7 +3145,8 @@ static int airo_thread(void *data) {
3154 else /* Shouldn't get here, but we make sure to unlock */ 3145 else /* Shouldn't get here, but we make sure to unlock */
3155 up(&ai->sem); 3146 up(&ai->sem);
3156 } 3147 }
3157 complete_and_exit (&ai->thr_exited, 0); 3148
3149 return 0;
3158} 3150}
3159 3151
3160static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) { 3152static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) {
@@ -3235,8 +3227,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
3235 if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) { 3227 if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) {
3236 if (auto_wep) 3228 if (auto_wep)
3237 apriv->expires = 0; 3229 apriv->expires = 0;
3238 if (apriv->task) 3230 if (apriv->list_bss_task)
3239 wake_up_process (apriv->task); 3231 wake_up_process(apriv->list_bss_task);
3240 set_bit(FLAG_UPDATE_UNI, &apriv->flags); 3232 set_bit(FLAG_UPDATE_UNI, &apriv->flags);
3241 set_bit(FLAG_UPDATE_MULTI, &apriv->flags); 3233 set_bit(FLAG_UPDATE_MULTI, &apriv->flags);
3242 3234
@@ -3950,13 +3942,11 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
3950 pRsp->rsp0 = IN4500(ai, RESP0); 3942 pRsp->rsp0 = IN4500(ai, RESP0);
3951 pRsp->rsp1 = IN4500(ai, RESP1); 3943 pRsp->rsp1 = IN4500(ai, RESP1);
3952 pRsp->rsp2 = IN4500(ai, RESP2); 3944 pRsp->rsp2 = IN4500(ai, RESP2);
3953 if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) { 3945 if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET)
3954 airo_print_err(ai->dev->name, "cmd= %x\n", pCmd->cmd); 3946 airo_print_err(ai->dev->name,
3955 airo_print_err(ai->dev->name, "status= %x\n", pRsp->status); 3947 "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x",
3956 airo_print_err(ai->dev->name, "Rsp0= %x\n", pRsp->rsp0); 3948 pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1,
3957 airo_print_err(ai->dev->name, "Rsp1= %x\n", pRsp->rsp1); 3949 pRsp->rsp2);
3958 airo_print_err(ai->dev->name, "Rsp2= %x\n", pRsp->rsp2);
3959 }
3960 3950
3961 // clear stuck command busy if necessary 3951 // clear stuck command busy if necessary
3962 if (IN4500(ai, COMMAND) & COMMAND_BUSY) { 3952 if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index d425c3cefded..3bfa791c323d 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -76,7 +76,7 @@ static void __devexit atmel_pci_remove(struct pci_dev *pdev)
76 76
77static int __init atmel_init_module(void) 77static int __init atmel_init_module(void)
78{ 78{
79 return pci_module_init(&atmel_driver); 79 return pci_register_driver(&atmel_driver);
80} 80}
81 81
82static void __exit atmel_cleanup_module(void) 82static void __exit atmel_cleanup_module(void)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 17a56828e232..6d4ea36bc564 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -33,14 +33,18 @@
33#define BCM43xx_PCICFG_ICR 0x94 33#define BCM43xx_PCICFG_ICR 0x94
34 34
35/* MMIO offsets */ 35/* MMIO offsets */
36#define BCM43xx_MMIO_DMA1_REASON 0x20 36#define BCM43xx_MMIO_DMA0_REASON 0x20
37#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x24 37#define BCM43xx_MMIO_DMA0_IRQ_MASK 0x24
38#define BCM43xx_MMIO_DMA2_REASON 0x28 38#define BCM43xx_MMIO_DMA1_REASON 0x28
39#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x2C 39#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x2C
40#define BCM43xx_MMIO_DMA3_REASON 0x30 40#define BCM43xx_MMIO_DMA2_REASON 0x30
41#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x34 41#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x34
42#define BCM43xx_MMIO_DMA4_REASON 0x38 42#define BCM43xx_MMIO_DMA3_REASON 0x38
43#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x3C 43#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x3C
44#define BCM43xx_MMIO_DMA4_REASON 0x40
45#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x44
46#define BCM43xx_MMIO_DMA5_REASON 0x48
47#define BCM43xx_MMIO_DMA5_IRQ_MASK 0x4C
44#define BCM43xx_MMIO_STATUS_BITFIELD 0x120 48#define BCM43xx_MMIO_STATUS_BITFIELD 0x120
45#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124 49#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124
46#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128 50#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128
@@ -56,14 +60,27 @@
56#define BCM43xx_MMIO_XMITSTAT_1 0x174 60#define BCM43xx_MMIO_XMITSTAT_1 0x174
57#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ 61#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */
58#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ 62#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
59#define BCM43xx_MMIO_DMA1_BASE 0x200 63
60#define BCM43xx_MMIO_DMA2_BASE 0x220 64/* 32-bit DMA */
61#define BCM43xx_MMIO_DMA3_BASE 0x240 65#define BCM43xx_MMIO_DMA32_BASE0 0x200
62#define BCM43xx_MMIO_DMA4_BASE 0x260 66#define BCM43xx_MMIO_DMA32_BASE1 0x220
67#define BCM43xx_MMIO_DMA32_BASE2 0x240
68#define BCM43xx_MMIO_DMA32_BASE3 0x260
69#define BCM43xx_MMIO_DMA32_BASE4 0x280
70#define BCM43xx_MMIO_DMA32_BASE5 0x2A0
71/* 64-bit DMA */
72#define BCM43xx_MMIO_DMA64_BASE0 0x200
73#define BCM43xx_MMIO_DMA64_BASE1 0x240
74#define BCM43xx_MMIO_DMA64_BASE2 0x280
75#define BCM43xx_MMIO_DMA64_BASE3 0x2C0
76#define BCM43xx_MMIO_DMA64_BASE4 0x300
77#define BCM43xx_MMIO_DMA64_BASE5 0x340
78/* PIO */
63#define BCM43xx_MMIO_PIO1_BASE 0x300 79#define BCM43xx_MMIO_PIO1_BASE 0x300
64#define BCM43xx_MMIO_PIO2_BASE 0x310 80#define BCM43xx_MMIO_PIO2_BASE 0x310
65#define BCM43xx_MMIO_PIO3_BASE 0x320 81#define BCM43xx_MMIO_PIO3_BASE 0x320
66#define BCM43xx_MMIO_PIO4_BASE 0x330 82#define BCM43xx_MMIO_PIO4_BASE 0x330
83
67#define BCM43xx_MMIO_PHY_VER 0x3E0 84#define BCM43xx_MMIO_PHY_VER 0x3E0
68#define BCM43xx_MMIO_PHY_RADIO 0x3E2 85#define BCM43xx_MMIO_PHY_RADIO 0x3E2
69#define BCM43xx_MMIO_ANTENNA 0x3E8 86#define BCM43xx_MMIO_ANTENNA 0x3E8
@@ -233,8 +250,14 @@
233#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000 250#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000
234 251
235/* sbtmstatehigh state flags */ 252/* sbtmstatehigh state flags */
236#define BCM43xx_SBTMSTATEHIGH_SERROR 0x1 253#define BCM43xx_SBTMSTATEHIGH_SERROR 0x00000001
237#define BCM43xx_SBTMSTATEHIGH_BUSY 0x4 254#define BCM43xx_SBTMSTATEHIGH_BUSY 0x00000004
255#define BCM43xx_SBTMSTATEHIGH_TIMEOUT 0x00000020
256#define BCM43xx_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000
257#define BCM43xx_SBTMSTATEHIGH_DMA64BIT 0x10000000
258#define BCM43xx_SBTMSTATEHIGH_GATEDCLK 0x20000000
259#define BCM43xx_SBTMSTATEHIGH_BISTFAILED 0x40000000
260#define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000
238 261
239/* sbimstate flags */ 262/* sbimstate flags */
240#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000 263#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000
@@ -283,6 +306,13 @@
283#define BCM43xx_SBF_TIME_UPDATE 0x10000000 306#define BCM43xx_SBF_TIME_UPDATE 0x10000000
284#define BCM43xx_SBF_80000000 0x80000000 /*FIXME: fix name*/ 307#define BCM43xx_SBF_80000000 0x80000000 /*FIXME: fix name*/
285 308
309/* Microcode */
310#define BCM43xx_UCODE_REVISION 0x0000
311#define BCM43xx_UCODE_PATCHLEVEL 0x0002
312#define BCM43xx_UCODE_DATE 0x0004
313#define BCM43xx_UCODE_TIME 0x0006
314#define BCM43xx_UCODE_STATUS 0x0040
315
286/* MicrocodeFlagsBitfield (addr + lo-word values?)*/ 316/* MicrocodeFlagsBitfield (addr + lo-word values?)*/
287#define BCM43xx_UCODEFLAGS_OFFSET 0x005E 317#define BCM43xx_UCODEFLAGS_OFFSET 0x005E
288 318
@@ -504,6 +534,12 @@ struct bcm43xx_phyinfo {
504 * This lock is only used by bcm43xx_phy_{un}lock() 534 * This lock is only used by bcm43xx_phy_{un}lock()
505 */ 535 */
506 spinlock_t lock; 536 spinlock_t lock;
537
538 /* Firmware. */
539 const struct firmware *ucode;
540 const struct firmware *pcm;
541 const struct firmware *initvals0;
542 const struct firmware *initvals1;
507}; 543};
508 544
509 545
@@ -568,8 +604,11 @@ struct bcm43xx_dma {
568 struct bcm43xx_dmaring *tx_ring1; 604 struct bcm43xx_dmaring *tx_ring1;
569 struct bcm43xx_dmaring *tx_ring2; 605 struct bcm43xx_dmaring *tx_ring2;
570 struct bcm43xx_dmaring *tx_ring3; 606 struct bcm43xx_dmaring *tx_ring3;
607 struct bcm43xx_dmaring *tx_ring4;
608 struct bcm43xx_dmaring *tx_ring5;
609
571 struct bcm43xx_dmaring *rx_ring0; 610 struct bcm43xx_dmaring *rx_ring0;
572 struct bcm43xx_dmaring *rx_ring1; /* only available on core.rev < 5 */ 611 struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */
573}; 612};
574 613
575/* Data structures for PIO transmission, per 80211 core. */ 614/* Data structures for PIO transmission, per 80211 core. */
@@ -593,12 +632,14 @@ struct bcm43xx_coreinfo {
593 u8 available:1, 632 u8 available:1,
594 enabled:1, 633 enabled:1,
595 initialized:1; 634 initialized:1;
596 /** core_id ID number */
597 u16 id;
598 /** core_rev revision number */ 635 /** core_rev revision number */
599 u8 rev; 636 u8 rev;
600 /** Index number for _switch_core() */ 637 /** Index number for _switch_core() */
601 u8 index; 638 u8 index;
639 /** core_id ID number */
640 u16 id;
641 /** Core-specific data. */
642 void *priv;
602}; 643};
603 644
604/* Additional information for each 80211 core. */ 645/* Additional information for each 80211 core. */
@@ -647,7 +688,23 @@ enum {
647 BCM43xx_STAT_RESTARTING, /* controller_restart() called. */ 688 BCM43xx_STAT_RESTARTING, /* controller_restart() called. */
648}; 689};
649#define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status) 690#define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status)
650#define bcm43xx_set_status(bcm, stat) atomic_set(&(bcm)->init_status, (stat)) 691#define bcm43xx_set_status(bcm, stat) do { \
692 atomic_set(&(bcm)->init_status, (stat)); \
693 smp_wmb(); \
694 } while (0)
695
696/* *** THEORY OF LOCKING ***
697 *
698 * We have two different locks in the bcm43xx driver.
699 * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private
700 * and the device registers. This mutex does _not_ protect
701 * against concurrency from the IRQ handler.
702 * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
703 *
704 * Please note that, if you only take the irq_lock, you are not protected
705 * against concurrency from the periodic work handlers.
706 * Most times you want to take _both_ locks.
707 */
651 708
652struct bcm43xx_private { 709struct bcm43xx_private {
653 struct ieee80211_device *ieee; 710 struct ieee80211_device *ieee;
@@ -659,7 +716,6 @@ struct bcm43xx_private {
659 716
660 void __iomem *mmio_addr; 717 void __iomem *mmio_addr;
661 718
662 /* Locking, see "theory of locking" text below. */
663 spinlock_t irq_lock; 719 spinlock_t irq_lock;
664 struct mutex mutex; 720 struct mutex mutex;
665 721
@@ -691,6 +747,7 @@ struct bcm43xx_private {
691 struct bcm43xx_sprominfo sprom; 747 struct bcm43xx_sprominfo sprom;
692#define BCM43xx_NR_LEDS 4 748#define BCM43xx_NR_LEDS 4
693 struct bcm43xx_led leds[BCM43xx_NR_LEDS]; 749 struct bcm43xx_led leds[BCM43xx_NR_LEDS];
750 spinlock_t leds_lock;
694 751
695 /* The currently active core. */ 752 /* The currently active core. */
696 struct bcm43xx_coreinfo *current_core; 753 struct bcm43xx_coreinfo *current_core;
@@ -708,10 +765,6 @@ struct bcm43xx_private {
708 struct bcm43xx_coreinfo core_80211[ BCM43xx_MAX_80211_CORES ]; 765 struct bcm43xx_coreinfo core_80211[ BCM43xx_MAX_80211_CORES ];
709 /* Additional information, specific to the 80211 cores. */ 766 /* Additional information, specific to the 80211 cores. */
710 struct bcm43xx_coreinfo_80211 core_80211_ext[ BCM43xx_MAX_80211_CORES ]; 767 struct bcm43xx_coreinfo_80211 core_80211_ext[ BCM43xx_MAX_80211_CORES ];
711 /* Index of the current 80211 core. If current_core is not
712 * an 80211 core, this is -1.
713 */
714 int current_80211_core_idx;
715 /* Number of available 80211 cores. */ 768 /* Number of available 80211 cores. */
716 int nr_80211_available; 769 int nr_80211_available;
717 770
@@ -719,11 +772,13 @@ struct bcm43xx_private {
719 772
720 /* Reason code of the last interrupt. */ 773 /* Reason code of the last interrupt. */
721 u32 irq_reason; 774 u32 irq_reason;
722 u32 dma_reason[4]; 775 u32 dma_reason[6];
723 /* saved irq enable/disable state bitfield. */ 776 /* saved irq enable/disable state bitfield. */
724 u32 irq_savedstate; 777 u32 irq_savedstate;
725 /* Link Quality calculation context. */ 778 /* Link Quality calculation context. */
726 struct bcm43xx_noise_calculation noisecalc; 779 struct bcm43xx_noise_calculation noisecalc;
780 /* if > 0 MAC is suspended. if == 0 MAC is enabled. */
781 int mac_suspended;
727 782
728 /* Threshold values. */ 783 /* Threshold values. */
729 //TODO: The RTS thr has to be _used_. Currently, it is only set via WX. 784 //TODO: The RTS thr has to be _used_. Currently, it is only set via WX.
@@ -746,12 +801,6 @@ struct bcm43xx_private {
746 struct bcm43xx_key key[54]; 801 struct bcm43xx_key key[54];
747 u8 default_key_idx; 802 u8 default_key_idx;
748 803
749 /* Firmware. */
750 const struct firmware *ucode;
751 const struct firmware *pcm;
752 const struct firmware *initvals0;
753 const struct firmware *initvals1;
754
755 /* Random Number Generator. */ 804 /* Random Number Generator. */
756 struct hwrng rng; 805 struct hwrng rng;
757 char rng_name[20 + 1]; 806 char rng_name[20 + 1];
@@ -763,55 +812,6 @@ struct bcm43xx_private {
763}; 812};
764 813
765 814
766/* *** THEORY OF LOCKING ***
767 *
768 * We have two different locks in the bcm43xx driver.
769 * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private
770 * and the device registers.
771 * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
772 *
773 * We have three types of helper function pairs to utilize these locks.
774 * (Always use the helper functions.)
775 * 1) bcm43xx_{un}lock_noirq():
776 * Takes bcm->mutex. Does _not_ protect against IRQ concurrency,
777 * so it is almost always unsafe, if device IRQs are enabled.
778 * So only use this, if device IRQs are masked.
779 * Locking may sleep.
780 * You can sleep within the critical section.
781 * 2) bcm43xx_{un}lock_irqonly():
782 * Takes bcm->irq_lock. Does _not_ protect against
783 * bcm43xx_lock_noirq() critical sections.
784 * Does only protect against the IRQ handler path and other
785 * irqonly() critical sections.
786 * Locking does not sleep.
787 * You must not sleep within the critical section.
788 * 3) bcm43xx_{un}lock_irqsafe():
789 * This is the cummulative lock and takes both, mutex and irq_lock.
790 * Protects against noirq() and irqonly() critical sections (and
791 * the IRQ handler path).
792 * Locking may sleep.
793 * You must not sleep within the critical section.
794 */
795
796/* Lock type 1 */
797#define bcm43xx_lock_noirq(bcm) mutex_lock(&(bcm)->mutex)
798#define bcm43xx_unlock_noirq(bcm) mutex_unlock(&(bcm)->mutex)
799/* Lock type 2 */
800#define bcm43xx_lock_irqonly(bcm, flags) \
801 spin_lock_irqsave(&(bcm)->irq_lock, flags)
802#define bcm43xx_unlock_irqonly(bcm, flags) \
803 spin_unlock_irqrestore(&(bcm)->irq_lock, flags)
804/* Lock type 3 */
805#define bcm43xx_lock_irqsafe(bcm, flags) do { \
806 bcm43xx_lock_noirq(bcm); \
807 bcm43xx_lock_irqonly(bcm, flags); \
808 } while (0)
809#define bcm43xx_unlock_irqsafe(bcm, flags) do { \
810 bcm43xx_unlock_irqonly(bcm, flags); \
811 bcm43xx_unlock_noirq(bcm); \
812 } while (0)
813
814
815static inline 815static inline
816struct bcm43xx_private * bcm43xx_priv(struct net_device *dev) 816struct bcm43xx_private * bcm43xx_priv(struct net_device *dev)
817{ 817{
@@ -863,34 +863,33 @@ int bcm43xx_using_pio(struct bcm43xx_private *bcm)
863 * any of these functions. 863 * any of these functions.
864 */ 864 */
865static inline 865static inline
866struct bcm43xx_coreinfo_80211 *
867bcm43xx_current_80211_priv(struct bcm43xx_private *bcm)
868{
869 assert(bcm->current_core->id == BCM43xx_COREID_80211);
870 return bcm->current_core->priv;
871}
872static inline
866struct bcm43xx_pio * bcm43xx_current_pio(struct bcm43xx_private *bcm) 873struct bcm43xx_pio * bcm43xx_current_pio(struct bcm43xx_private *bcm)
867{ 874{
868 assert(bcm43xx_using_pio(bcm)); 875 assert(bcm43xx_using_pio(bcm));
869 assert(bcm->current_80211_core_idx >= 0); 876 return &(bcm43xx_current_80211_priv(bcm)->pio);
870 assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
871 return &(bcm->core_80211_ext[bcm->current_80211_core_idx].pio);
872} 877}
873static inline 878static inline
874struct bcm43xx_dma * bcm43xx_current_dma(struct bcm43xx_private *bcm) 879struct bcm43xx_dma * bcm43xx_current_dma(struct bcm43xx_private *bcm)
875{ 880{
876 assert(!bcm43xx_using_pio(bcm)); 881 assert(!bcm43xx_using_pio(bcm));
877 assert(bcm->current_80211_core_idx >= 0); 882 return &(bcm43xx_current_80211_priv(bcm)->dma);
878 assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
879 return &(bcm->core_80211_ext[bcm->current_80211_core_idx].dma);
880} 883}
881static inline 884static inline
882struct bcm43xx_phyinfo * bcm43xx_current_phy(struct bcm43xx_private *bcm) 885struct bcm43xx_phyinfo * bcm43xx_current_phy(struct bcm43xx_private *bcm)
883{ 886{
884 assert(bcm->current_80211_core_idx >= 0); 887 return &(bcm43xx_current_80211_priv(bcm)->phy);
885 assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
886 return &(bcm->core_80211_ext[bcm->current_80211_core_idx].phy);
887} 888}
888static inline 889static inline
889struct bcm43xx_radioinfo * bcm43xx_current_radio(struct bcm43xx_private *bcm) 890struct bcm43xx_radioinfo * bcm43xx_current_radio(struct bcm43xx_private *bcm)
890{ 891{
891 assert(bcm->current_80211_core_idx >= 0); 892 return &(bcm43xx_current_80211_priv(bcm)->radio);
892 assert(bcm->current_80211_core_idx < BCM43xx_MAX_80211_CORES);
893 return &(bcm->core_80211_ext[bcm->current_80211_core_idx].radio);
894} 893}
895 894
896 895
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
index ce2e40b29b4f..923275ea0789 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
@@ -77,7 +77,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
77 77
78 down(&big_buffer_sem); 78 down(&big_buffer_sem);
79 79
80 bcm43xx_lock_irqsafe(bcm, flags); 80 mutex_lock(&bcm->mutex);
81 spin_lock_irqsave(&bcm->irq_lock, flags);
81 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 82 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
82 fappend("Board not initialized.\n"); 83 fappend("Board not initialized.\n");
83 goto out; 84 goto out;
@@ -121,7 +122,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
121 fappend("\n"); 122 fappend("\n");
122 123
123out: 124out:
124 bcm43xx_unlock_irqsafe(bcm, flags); 125 spin_unlock_irqrestore(&bcm->irq_lock, flags);
126 mutex_unlock(&bcm->mutex);
125 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 127 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
126 up(&big_buffer_sem); 128 up(&big_buffer_sem);
127 return res; 129 return res;
@@ -159,7 +161,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
159 unsigned long flags; 161 unsigned long flags;
160 162
161 down(&big_buffer_sem); 163 down(&big_buffer_sem);
162 bcm43xx_lock_irqsafe(bcm, flags); 164 mutex_lock(&bcm->mutex);
165 spin_lock_irqsave(&bcm->irq_lock, flags);
163 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 166 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
164 fappend("Board not initialized.\n"); 167 fappend("Board not initialized.\n");
165 goto out; 168 goto out;
@@ -169,7 +172,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
169 fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags); 172 fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags);
170 173
171out: 174out:
172 bcm43xx_unlock_irqsafe(bcm, flags); 175 spin_unlock_irqrestore(&bcm->irq_lock, flags);
176 mutex_unlock(&bcm->mutex);
173 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 177 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
174 up(&big_buffer_sem); 178 up(&big_buffer_sem);
175 return res; 179 return res;
@@ -188,7 +192,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
188 u64 tsf; 192 u64 tsf;
189 193
190 down(&big_buffer_sem); 194 down(&big_buffer_sem);
191 bcm43xx_lock_irqsafe(bcm, flags); 195 mutex_lock(&bcm->mutex);
196 spin_lock_irqsave(&bcm->irq_lock, flags);
192 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 197 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
193 fappend("Board not initialized.\n"); 198 fappend("Board not initialized.\n");
194 goto out; 199 goto out;
@@ -199,7 +204,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
199 (unsigned int)(tsf & 0xFFFFFFFFULL)); 204 (unsigned int)(tsf & 0xFFFFFFFFULL));
200 205
201out: 206out:
202 bcm43xx_unlock_irqsafe(bcm, flags); 207 spin_unlock_irqrestore(&bcm->irq_lock, flags);
208 mutex_unlock(&bcm->mutex);
203 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 209 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
204 up(&big_buffer_sem); 210 up(&big_buffer_sem);
205 return res; 211 return res;
@@ -221,7 +227,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
221 res = -EFAULT; 227 res = -EFAULT;
222 goto out_up; 228 goto out_up;
223 } 229 }
224 bcm43xx_lock_irqsafe(bcm, flags); 230 mutex_lock(&bcm->mutex);
231 spin_lock_irqsave(&bcm->irq_lock, flags);
225 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 232 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
226 printk(KERN_INFO PFX "debugfs: Board not initialized.\n"); 233 printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
227 res = -EFAULT; 234 res = -EFAULT;
@@ -237,7 +244,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
237 res = buf_size; 244 res = buf_size;
238 245
239out_unlock: 246out_unlock:
240 bcm43xx_unlock_irqsafe(bcm, flags); 247 spin_unlock_irqrestore(&bcm->irq_lock, flags);
248 mutex_unlock(&bcm->mutex);
241out_up: 249out_up:
242 up(&big_buffer_sem); 250 up(&big_buffer_sem);
243 return res; 251 return res;
@@ -258,7 +266,8 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
258 int i, cnt, j = 0; 266 int i, cnt, j = 0;
259 267
260 down(&big_buffer_sem); 268 down(&big_buffer_sem);
261 bcm43xx_lock_irqsafe(bcm, flags); 269 mutex_lock(&bcm->mutex);
270 spin_lock_irqsave(&bcm->irq_lock, flags);
262 271
263 fappend("Last %d logged xmitstatus blobs (Latest first):\n\n", 272 fappend("Last %d logged xmitstatus blobs (Latest first):\n\n",
264 BCM43xx_NR_LOGGED_XMITSTATUS); 273 BCM43xx_NR_LOGGED_XMITSTATUS);
@@ -294,14 +303,51 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
294 i = BCM43xx_NR_LOGGED_XMITSTATUS - 1; 303 i = BCM43xx_NR_LOGGED_XMITSTATUS - 1;
295 } 304 }
296 305
297 bcm43xx_unlock_irqsafe(bcm, flags); 306 spin_unlock_irqrestore(&bcm->irq_lock, flags);
298 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 307 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
299 bcm43xx_lock_irqsafe(bcm, flags); 308 spin_lock_irqsave(&bcm->irq_lock, flags);
300 if (*ppos == pos) { 309 if (*ppos == pos) {
301 /* Done. Drop the copied data. */ 310 /* Done. Drop the copied data. */
302 e->xmitstatus_printing = 0; 311 e->xmitstatus_printing = 0;
303 } 312 }
304 bcm43xx_unlock_irqsafe(bcm, flags); 313 spin_unlock_irqrestore(&bcm->irq_lock, flags);
314 mutex_unlock(&bcm->mutex);
315 up(&big_buffer_sem);
316 return res;
317}
318
319static ssize_t restart_write_file(struct file *file, const char __user *user_buf,
320 size_t count, loff_t *ppos)
321{
322 struct bcm43xx_private *bcm = file->private_data;
323 char *buf = really_big_buffer;
324 ssize_t buf_size;
325 ssize_t res;
326 unsigned long flags;
327
328 buf_size = min(count, sizeof (really_big_buffer) - 1);
329 down(&big_buffer_sem);
330 if (copy_from_user(buf, user_buf, buf_size)) {
331 res = -EFAULT;
332 goto out_up;
333 }
334 mutex_lock(&(bcm)->mutex);
335 spin_lock_irqsave(&(bcm)->irq_lock, flags);
336 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
337 printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
338 res = -EFAULT;
339 goto out_unlock;
340 }
341 if (count > 0 && buf[0] == '1') {
342 bcm43xx_controller_restart(bcm, "manually restarted");
343 res = count;
344 } else
345 res = -EINVAL;
346
347out_unlock:
348 spin_unlock_irqrestore(&(bcm)->irq_lock, flags);
349 mutex_unlock(&(bcm)->mutex);
350out_up:
305 up(&big_buffer_sem); 351 up(&big_buffer_sem);
306 return res; 352 return res;
307} 353}
@@ -339,6 +385,11 @@ static struct file_operations txstat_fops = {
339 .open = open_file_generic, 385 .open = open_file_generic,
340}; 386};
341 387
388static struct file_operations restart_fops = {
389 .write = restart_write_file,
390 .open = open_file_generic,
391};
392
342 393
343void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm) 394void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm)
344{ 395{
@@ -390,6 +441,10 @@ void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm)
390 bcm, &txstat_fops); 441 bcm, &txstat_fops);
391 if (!e->dentry_txstat) 442 if (!e->dentry_txstat)
392 printk(KERN_ERR PFX "debugfs: creating \"tx_status\" for \"%s\" failed!\n", devdir); 443 printk(KERN_ERR PFX "debugfs: creating \"tx_status\" for \"%s\" failed!\n", devdir);
444 e->dentry_restart = debugfs_create_file("restart", 0222, e->subdir,
445 bcm, &restart_fops);
446 if (!e->dentry_restart)
447 printk(KERN_ERR PFX "debugfs: creating \"restart\" for \"%s\" failed!\n", devdir);
393} 448}
394 449
395void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm) 450void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm)
@@ -405,6 +460,7 @@ void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm)
405 debugfs_remove(e->dentry_devinfo); 460 debugfs_remove(e->dentry_devinfo);
406 debugfs_remove(e->dentry_tsf); 461 debugfs_remove(e->dentry_tsf);
407 debugfs_remove(e->dentry_txstat); 462 debugfs_remove(e->dentry_txstat);
463 debugfs_remove(e->dentry_restart);
408 debugfs_remove(e->subdir); 464 debugfs_remove(e->subdir);
409 kfree(e->xmitstatus_buffer); 465 kfree(e->xmitstatus_buffer);
410 kfree(e->xmitstatus_print_buffer); 466 kfree(e->xmitstatus_print_buffer);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h
index 50ce267f794d..a40d1af35545 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h
@@ -20,6 +20,7 @@ struct bcm43xx_dfsentry {
20 struct dentry *dentry_spromdump; 20 struct dentry *dentry_spromdump;
21 struct dentry *dentry_tsf; 21 struct dentry *dentry_tsf;
22 struct dentry *dentry_txstat; 22 struct dentry *dentry_txstat;
23 struct dentry *dentry_restart;
23 24
24 struct bcm43xx_private *bcm; 25 struct bcm43xx_private *bcm;
25 26
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index d0318e525ba7..76e3aed4b471 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -4,7 +4,7 @@
4 4
5 DMA ringbuffer and descriptor allocation/management 5 DMA ringbuffer and descriptor allocation/management
6 6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> 7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8 8
9 Some code in this file is derived from the b44.c driver 9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller 10 Copyright (C) 2002 David S. Miller
@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot)
109 } 109 }
110} 110}
111 111
112u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113{
114 static const u16 map64[] = {
115 BCM43xx_MMIO_DMA64_BASE0,
116 BCM43xx_MMIO_DMA64_BASE1,
117 BCM43xx_MMIO_DMA64_BASE2,
118 BCM43xx_MMIO_DMA64_BASE3,
119 BCM43xx_MMIO_DMA64_BASE4,
120 BCM43xx_MMIO_DMA64_BASE5,
121 };
122 static const u16 map32[] = {
123 BCM43xx_MMIO_DMA32_BASE0,
124 BCM43xx_MMIO_DMA32_BASE1,
125 BCM43xx_MMIO_DMA32_BASE2,
126 BCM43xx_MMIO_DMA32_BASE3,
127 BCM43xx_MMIO_DMA32_BASE4,
128 BCM43xx_MMIO_DMA32_BASE5,
129 };
130
131 if (dma64bit) {
132 assert(controller_idx >= 0 &&
133 controller_idx < ARRAY_SIZE(map64));
134 return map64[controller_idx];
135 }
136 assert(controller_idx >= 0 &&
137 controller_idx < ARRAY_SIZE(map32));
138 return map32[controller_idx];
139}
140
112static inline 141static inline
113dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, 142dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114 unsigned char *buf, 143 unsigned char *buf,
@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
172/* Unmap and free a descriptor buffer. */ 201/* Unmap and free a descriptor buffer. */
173static inline 202static inline
174void free_descriptor_buffer(struct bcm43xx_dmaring *ring, 203void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
175 struct bcm43xx_dmadesc *desc,
176 struct bcm43xx_dmadesc_meta *meta, 204 struct bcm43xx_dmadesc_meta *meta,
177 int irq_context) 205 int irq_context)
178{ 206{
@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
188{ 216{
189 struct device *dev = &(ring->bcm->pci_dev->dev); 217 struct device *dev = &(ring->bcm->pci_dev->dev);
190 218
191 ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 219 ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
192 &(ring->dmabase), GFP_KERNEL); 220 &(ring->dmabase), GFP_KERNEL);
193 if (!ring->vbase) { 221 if (!ring->descbase) {
194 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); 222 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
195 return -ENOMEM; 223 return -ENOMEM;
196 } 224 }
197 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { 225 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
198 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
199 "(0x%llx, len: %lu)\n",
200 (unsigned long long)ring->dmabase,
201 BCM43xx_DMA_RINGMEMSIZE);
202 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
203 ring->vbase, ring->dmabase);
204 return -ENOMEM;
205 }
206 assert(!(ring->dmabase & 0x000003FF));
207 memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
208 226
209 return 0; 227 return 0;
210} 228}
@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring)
214 struct device *dev = &(ring->bcm->pci_dev->dev); 232 struct device *dev = &(ring->bcm->pci_dev->dev);
215 233
216 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 234 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
217 ring->vbase, ring->dmabase); 235 ring->descbase, ring->dmabase);
218} 236}
219 237
220/* Reset the RX DMA channel */ 238/* Reset the RX DMA channel */
221int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 239int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
222 u16 mmio_base) 240 u16 mmio_base, int dma64)
223{ 241{
224 int i; 242 int i;
225 u32 value; 243 u32 value;
244 u16 offset;
226 245
227 bcm43xx_write32(bcm, 246 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
228 mmio_base + BCM43xx_DMA_RX_CONTROL, 247 bcm43xx_write32(bcm, mmio_base + offset, 0);
229 0x00000000);
230 for (i = 0; i < 1000; i++) { 248 for (i = 0; i < 1000; i++) {
231 value = bcm43xx_read32(bcm, 249 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
232 mmio_base + BCM43xx_DMA_RX_STATUS); 250 value = bcm43xx_read32(bcm, mmio_base + offset);
233 value &= BCM43xx_DMA_RXSTAT_STAT_MASK; 251 if (dma64) {
234 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) { 252 value &= BCM43xx_DMA64_RXSTAT;
235 i = -1; 253 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
236 break; 254 i = -1;
255 break;
256 }
257 } else {
258 value &= BCM43xx_DMA32_RXSTATE;
259 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
260 i = -1;
261 break;
262 }
237 } 263 }
238 udelay(10); 264 udelay(10);
239 } 265 }
@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
247 273
248/* Reset the RX DMA channel */ 274/* Reset the RX DMA channel */
249int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 275int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
250 u16 mmio_base) 276 u16 mmio_base, int dma64)
251{ 277{
252 int i; 278 int i;
253 u32 value; 279 u32 value;
280 u16 offset;
254 281
255 for (i = 0; i < 1000; i++) { 282 for (i = 0; i < 1000; i++) {
256 value = bcm43xx_read32(bcm, 283 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
257 mmio_base + BCM43xx_DMA_TX_STATUS); 284 value = bcm43xx_read32(bcm, mmio_base + offset);
258 value &= BCM43xx_DMA_TXSTAT_STAT_MASK; 285 if (dma64) {
259 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED || 286 value &= BCM43xx_DMA64_TXSTAT;
260 value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT || 287 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
261 value == BCM43xx_DMA_TXSTAT_STAT_STOPPED) 288 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
262 break; 289 value == BCM43xx_DMA64_TXSTAT_STOPPED)
290 break;
291 } else {
292 value &= BCM43xx_DMA32_TXSTATE;
293 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
294 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
295 value == BCM43xx_DMA32_TXSTAT_STOPPED)
296 break;
297 }
263 udelay(10); 298 udelay(10);
264 } 299 }
265 bcm43xx_write32(bcm, 300 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
266 mmio_base + BCM43xx_DMA_TX_CONTROL, 301 bcm43xx_write32(bcm, mmio_base + offset, 0);
267 0x00000000);
268 for (i = 0; i < 1000; i++) { 302 for (i = 0; i < 1000; i++) {
269 value = bcm43xx_read32(bcm, 303 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
270 mmio_base + BCM43xx_DMA_TX_STATUS); 304 value = bcm43xx_read32(bcm, mmio_base + offset);
271 value &= BCM43xx_DMA_TXSTAT_STAT_MASK; 305 if (dma64) {
272 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) { 306 value &= BCM43xx_DMA64_TXSTAT;
273 i = -1; 307 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
274 break; 308 i = -1;
309 break;
310 }
311 } else {
312 value &= BCM43xx_DMA32_TXSTATE;
313 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
314 i = -1;
315 break;
316 }
275 } 317 }
276 udelay(10); 318 udelay(10);
277 } 319 }
@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
285 return 0; 327 return 0;
286} 328}
287 329
330static void fill_descriptor(struct bcm43xx_dmaring *ring,
331 struct bcm43xx_dmadesc_generic *desc,
332 dma_addr_t dmaaddr,
333 u16 bufsize,
334 int start, int end, int irq)
335{
336 int slot;
337
338 slot = bcm43xx_dma_desc2idx(ring, desc);
339 assert(slot >= 0 && slot < ring->nr_slots);
340
341 if (ring->dma64) {
342 u32 ctl0 = 0, ctl1 = 0;
343 u32 addrlo, addrhi;
344 u32 addrext;
345
346 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
347 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
348 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
349 addrhi |= ring->routing;
350 if (slot == ring->nr_slots - 1)
351 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
352 if (start)
353 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
354 if (end)
355 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
356 if (irq)
357 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
358 ctl1 |= (bufsize - ring->frameoffset)
359 & BCM43xx_DMA64_DCTL1_BYTECNT;
360 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
361 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
362
363 desc->dma64.control0 = cpu_to_le32(ctl0);
364 desc->dma64.control1 = cpu_to_le32(ctl1);
365 desc->dma64.address_low = cpu_to_le32(addrlo);
366 desc->dma64.address_high = cpu_to_le32(addrhi);
367 } else {
368 u32 ctl;
369 u32 addr;
370 u32 addrext;
371
372 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
373 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
374 >> BCM43xx_DMA32_ROUTING_SHIFT;
375 addr |= ring->routing;
376 ctl = (bufsize - ring->frameoffset)
377 & BCM43xx_DMA32_DCTL_BYTECNT;
378 if (slot == ring->nr_slots - 1)
379 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
380 if (start)
381 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
382 if (end)
383 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
384 if (irq)
385 ctl |= BCM43xx_DMA32_DCTL_IRQ;
386 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
387 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
388
389 desc->dma32.control = cpu_to_le32(ctl);
390 desc->dma32.address = cpu_to_le32(addr);
391 }
392}
393
288static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, 394static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
289 struct bcm43xx_dmadesc *desc, 395 struct bcm43xx_dmadesc_generic *desc,
290 struct bcm43xx_dmadesc_meta *meta, 396 struct bcm43xx_dmadesc_meta *meta,
291 gfp_t gfp_flags) 397 gfp_t gfp_flags)
292{ 398{
293 struct bcm43xx_rxhdr *rxhdr; 399 struct bcm43xx_rxhdr *rxhdr;
400 struct bcm43xx_hwxmitstatus *xmitstat;
294 dma_addr_t dmaaddr; 401 dma_addr_t dmaaddr;
295 u32 desc_addr;
296 u32 desc_ctl;
297 const int slot = (int)(desc - ring->vbase);
298 struct sk_buff *skb; 402 struct sk_buff *skb;
299 403
300 assert(slot >= 0 && slot < ring->nr_slots);
301 assert(!ring->tx); 404 assert(!ring->tx);
302 405
303 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 406 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
304 if (unlikely(!skb)) 407 if (unlikely(!skb))
305 return -ENOMEM; 408 return -ENOMEM;
306 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 409 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
307 if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
308 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
309 dev_kfree_skb_any(skb);
310 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
311 "(0x%llx, len: %u)\n",
312 (unsigned long long)dmaaddr, ring->rx_buffersize);
313 return -ENOMEM;
314 }
315 meta->skb = skb; 410 meta->skb = skb;
316 meta->dmaaddr = dmaaddr; 411 meta->dmaaddr = dmaaddr;
317 skb->dev = ring->bcm->net_dev; 412 skb->dev = ring->bcm->net_dev;
318 desc_addr = (u32)(dmaaddr + ring->memoffset); 413
319 desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK & 414 fill_descriptor(ring, desc, dmaaddr,
320 (u32)(ring->rx_buffersize - ring->frameoffset)); 415 ring->rx_buffersize, 0, 0, 0);
321 if (slot == ring->nr_slots - 1)
322 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
323 set_desc_addr(desc, desc_addr);
324 set_desc_ctl(desc, desc_ctl);
325 416
326 rxhdr = (struct bcm43xx_rxhdr *)(skb->data); 417 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
327 rxhdr->frame_length = 0; 418 rxhdr->frame_length = 0;
328 rxhdr->flags1 = 0; 419 rxhdr->flags1 = 0;
420 xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
421 xmitstat->cookie = 0;
329 422
330 return 0; 423 return 0;
331} 424}
@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
336static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) 429static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
337{ 430{
338 int i, err = -ENOMEM; 431 int i, err = -ENOMEM;
339 struct bcm43xx_dmadesc *desc; 432 struct bcm43xx_dmadesc_generic *desc;
340 struct bcm43xx_dmadesc_meta *meta; 433 struct bcm43xx_dmadesc_meta *meta;
341 434
342 for (i = 0; i < ring->nr_slots; i++) { 435 for (i = 0; i < ring->nr_slots; i++) {
343 desc = ring->vbase + i; 436 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
344 meta = ring->meta + i;
345 437
346 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 438 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
347 if (err) 439 if (err)
348 goto err_unwind; 440 goto err_unwind;
349 } 441 }
442 mb();
350 ring->used_slots = ring->nr_slots; 443 ring->used_slots = ring->nr_slots;
351 err = 0; 444 err = 0;
352out: 445out:
@@ -354,8 +447,7 @@ out:
354 447
355err_unwind: 448err_unwind:
356 for (i--; i >= 0; i--) { 449 for (i--; i >= 0; i--) {
357 desc = ring->vbase + i; 450 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
358 meta = ring->meta + i;
359 451
360 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 452 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
361 dev_kfree_skb(meta->skb); 453 dev_kfree_skb(meta->skb);
@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
371{ 463{
372 int err = 0; 464 int err = 0;
373 u32 value; 465 u32 value;
466 u32 addrext;
374 467
375 if (ring->tx) { 468 if (ring->tx) {
376 /* Set Transmit Control register to "transmit enable" */ 469 if (ring->dma64) {
377 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 470 u64 ringbase = (u64)(ring->dmabase);
378 BCM43xx_DMA_TXCTRL_ENABLE); 471
379 /* Set Transmit Descriptor ring address. */ 472 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
380 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 473 value = BCM43xx_DMA64_TXENABLE;
381 ring->dmabase + ring->memoffset); 474 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
475 & BCM43xx_DMA64_TXADDREXT_MASK;
476 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
477 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
478 (ringbase & 0xFFFFFFFF));
479 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
480 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
481 | ring->routing);
482 } else {
483 u32 ringbase = (u32)(ring->dmabase);
484
485 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
486 value = BCM43xx_DMA32_TXENABLE;
487 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
488 & BCM43xx_DMA32_TXADDREXT_MASK;
489 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
490 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
491 (ringbase & ~BCM43xx_DMA32_ROUTING)
492 | ring->routing);
493 }
382 } else { 494 } else {
383 err = alloc_initial_descbuffers(ring); 495 err = alloc_initial_descbuffers(ring);
384 if (err) 496 if (err)
385 goto out; 497 goto out;
386 /* Set Receive Control "receive enable" and frame offset */ 498 if (ring->dma64) {
387 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT); 499 u64 ringbase = (u64)(ring->dmabase);
388 value |= BCM43xx_DMA_RXCTRL_ENABLE; 500
389 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value); 501 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
390 /* Set Receive Descriptor ring address. */ 502 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
391 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 503 value |= BCM43xx_DMA64_RXENABLE;
392 ring->dmabase + ring->memoffset); 504 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
393 /* Init the descriptor pointer. */ 505 & BCM43xx_DMA64_RXADDREXT_MASK;
394 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200); 506 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
507 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
508 (ringbase & 0xFFFFFFFF));
509 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
510 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
511 | ring->routing);
512 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
513 } else {
514 u32 ringbase = (u32)(ring->dmabase);
515
516 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
517 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
518 value |= BCM43xx_DMA32_RXENABLE;
519 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
520 & BCM43xx_DMA32_RXADDREXT_MASK;
521 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
522 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
523 (ringbase & ~BCM43xx_DMA32_ROUTING)
524 | ring->routing);
525 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
526 }
395 } 527 }
396 528
397out: 529out:
@@ -402,27 +534,32 @@ out:
402static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) 534static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
403{ 535{
404 if (ring->tx) { 536 if (ring->tx) {
405 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base); 537 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
406 /* Zero out Transmit Descriptor ring address. */ 538 if (ring->dma64) {
407 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0); 539 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
540 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
541 } else
542 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
408 } else { 543 } else {
409 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base); 544 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
410 /* Zero out Receive Descriptor ring address. */ 545 if (ring->dma64) {
411 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0); 546 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
547 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
548 } else
549 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
412 } 550 }
413} 551}
414 552
415static void free_all_descbuffers(struct bcm43xx_dmaring *ring) 553static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
416{ 554{
417 struct bcm43xx_dmadesc *desc; 555 struct bcm43xx_dmadesc_generic *desc;
418 struct bcm43xx_dmadesc_meta *meta; 556 struct bcm43xx_dmadesc_meta *meta;
419 int i; 557 int i;
420 558
421 if (!ring->used_slots) 559 if (!ring->used_slots)
422 return; 560 return;
423 for (i = 0; i < ring->nr_slots; i++) { 561 for (i = 0; i < ring->nr_slots; i++) {
424 desc = ring->vbase + i; 562 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
425 meta = ring->meta + i;
426 563
427 if (!meta->skb) { 564 if (!meta->skb) {
428 assert(ring->tx); 565 assert(ring->tx);
@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
430 } 567 }
431 if (ring->tx) { 568 if (ring->tx) {
432 unmap_descbuffer(ring, meta->dmaaddr, 569 unmap_descbuffer(ring, meta->dmaaddr,
433 meta->skb->len, 1); 570 meta->skb->len, 1);
434 } else { 571 } else {
435 unmap_descbuffer(ring, meta->dmaaddr, 572 unmap_descbuffer(ring, meta->dmaaddr,
436 ring->rx_buffersize, 0); 573 ring->rx_buffersize, 0);
437 } 574 }
438 free_descriptor_buffer(ring, desc, meta, 0); 575 free_descriptor_buffer(ring, meta, 0);
439 } 576 }
440} 577}
441 578
442/* Main initialization function. */ 579/* Main initialization function. */
443static 580static
444struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, 581struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
445 u16 dma_controller_base, 582 int controller_index,
446 int nr_descriptor_slots, 583 int for_tx,
447 int tx) 584 int dma64)
448{ 585{
449 struct bcm43xx_dmaring *ring; 586 struct bcm43xx_dmaring *ring;
450 int err; 587 int err;
588 int nr_slots;
451 589
452 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 590 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
453 if (!ring) 591 if (!ring)
454 goto out; 592 goto out;
455 593
456 ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots, 594 nr_slots = BCM43xx_RXRING_SLOTS;
595 if (for_tx)
596 nr_slots = BCM43xx_TXRING_SLOTS;
597
598 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
457 GFP_KERNEL); 599 GFP_KERNEL);
458 if (!ring->meta) 600 if (!ring->meta)
459 goto err_kfree_ring; 601 goto err_kfree_ring;
460 602
461 ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET; 603 ring->routing = BCM43xx_DMA32_CLIENTTRANS;
604 if (dma64)
605 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
462#ifdef CONFIG_BCM947XX 606#ifdef CONFIG_BCM947XX
463 if (bcm->pci_dev->bus->number == 0) 607 if (bcm->pci_dev->bus->number == 0)
464 ring->memoffset = 0; 608 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
465#endif 609#endif
466 610
467 ring->bcm = bcm; 611 ring->bcm = bcm;
468 ring->nr_slots = nr_descriptor_slots; 612 ring->nr_slots = nr_slots;
469 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; 613 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
470 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; 614 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
471 assert(ring->suspend_mark < ring->resume_mark); 615 assert(ring->suspend_mark < ring->resume_mark);
472 ring->mmio_base = dma_controller_base; 616 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
473 if (tx) { 617 ring->index = controller_index;
618 ring->dma64 = !!dma64;
619 if (for_tx) {
474 ring->tx = 1; 620 ring->tx = 1;
475 ring->current_slot = -1; 621 ring->current_slot = -1;
476 } else { 622 } else {
477 switch (dma_controller_base) { 623 if (ring->index == 0) {
478 case BCM43xx_MMIO_DMA1_BASE: 624 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
479 ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE; 625 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
480 ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET; 626 } else if (ring->index == 3) {
481 break; 627 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
482 case BCM43xx_MMIO_DMA4_BASE: 628 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
483 ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE; 629 } else
484 ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
485 break;
486 default:
487 assert(0); 630 assert(0);
488 }
489 } 631 }
490 632
491 err = alloc_ringmemory(ring); 633 err = alloc_ringmemory(ring);
@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
514 if (!ring) 656 if (!ring)
515 return; 657 return;
516 658
517 dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n", 659 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
660 (ring->dma64) ? "64" : "32",
518 ring->mmio_base, 661 ring->mmio_base,
519 (ring->tx) ? "TX" : "RX", 662 (ring->tx) ? "TX" : "RX",
520 ring->max_used_slots, ring->nr_slots); 663 ring->max_used_slots, ring->nr_slots);
@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
537 return; 680 return;
538 dma = bcm43xx_current_dma(bcm); 681 dma = bcm43xx_current_dma(bcm);
539 682
540 bcm43xx_destroy_dmaring(dma->rx_ring1); 683 bcm43xx_destroy_dmaring(dma->rx_ring3);
541 dma->rx_ring1 = NULL; 684 dma->rx_ring3 = NULL;
542 bcm43xx_destroy_dmaring(dma->rx_ring0); 685 bcm43xx_destroy_dmaring(dma->rx_ring0);
543 dma->rx_ring0 = NULL; 686 dma->rx_ring0 = NULL;
687
688 bcm43xx_destroy_dmaring(dma->tx_ring5);
689 dma->tx_ring5 = NULL;
690 bcm43xx_destroy_dmaring(dma->tx_ring4);
691 dma->tx_ring4 = NULL;
544 bcm43xx_destroy_dmaring(dma->tx_ring3); 692 bcm43xx_destroy_dmaring(dma->tx_ring3);
545 dma->tx_ring3 = NULL; 693 dma->tx_ring3 = NULL;
546 bcm43xx_destroy_dmaring(dma->tx_ring2); 694 bcm43xx_destroy_dmaring(dma->tx_ring2);
@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
556 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); 704 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
557 struct bcm43xx_dmaring *ring; 705 struct bcm43xx_dmaring *ring;
558 int err = -ENOMEM; 706 int err = -ENOMEM;
707 int dma64 = 0;
708 u32 sbtmstatehi;
709
710 sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
711 if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
712 dma64 = 1;
559 713
560 /* setup TX DMA channels. */ 714 /* setup TX DMA channels. */
561 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, 715 ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
562 BCM43xx_TXRING_SLOTS, 1);
563 if (!ring) 716 if (!ring)
564 goto out; 717 goto out;
565 dma->tx_ring0 = ring; 718 dma->tx_ring0 = ring;
566 719
567 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE, 720 ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
568 BCM43xx_TXRING_SLOTS, 1);
569 if (!ring) 721 if (!ring)
570 goto err_destroy_tx0; 722 goto err_destroy_tx0;
571 dma->tx_ring1 = ring; 723 dma->tx_ring1 = ring;
572 724
573 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE, 725 ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
574 BCM43xx_TXRING_SLOTS, 1);
575 if (!ring) 726 if (!ring)
576 goto err_destroy_tx1; 727 goto err_destroy_tx1;
577 dma->tx_ring2 = ring; 728 dma->tx_ring2 = ring;
578 729
579 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, 730 ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
580 BCM43xx_TXRING_SLOTS, 1);
581 if (!ring) 731 if (!ring)
582 goto err_destroy_tx2; 732 goto err_destroy_tx2;
583 dma->tx_ring3 = ring; 733 dma->tx_ring3 = ring;
584 734
585 /* setup RX DMA channels. */ 735 ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
586 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
587 BCM43xx_RXRING_SLOTS, 0);
588 if (!ring) 736 if (!ring)
589 goto err_destroy_tx3; 737 goto err_destroy_tx3;
738 dma->tx_ring4 = ring;
739
740 ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
741 if (!ring)
742 goto err_destroy_tx4;
743 dma->tx_ring5 = ring;
744
745 /* setup RX DMA channels. */
746 ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
747 if (!ring)
748 goto err_destroy_tx5;
590 dma->rx_ring0 = ring; 749 dma->rx_ring0 = ring;
591 750
592 if (bcm->current_core->rev < 5) { 751 if (bcm->current_core->rev < 5) {
593 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, 752 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
594 BCM43xx_RXRING_SLOTS, 0);
595 if (!ring) 753 if (!ring)
596 goto err_destroy_rx0; 754 goto err_destroy_rx0;
597 dma->rx_ring1 = ring; 755 dma->rx_ring3 = ring;
598 } 756 }
599 757
600 dprintk(KERN_INFO PFX "DMA initialized\n"); 758 dprintk(KERN_INFO PFX "%s DMA initialized\n",
759 dma64 ? "64-bit" : "32-bit");
601 err = 0; 760 err = 0;
602out: 761out:
603 return err; 762 return err;
@@ -605,6 +764,12 @@ out:
605err_destroy_rx0: 764err_destroy_rx0:
606 bcm43xx_destroy_dmaring(dma->rx_ring0); 765 bcm43xx_destroy_dmaring(dma->rx_ring0);
607 dma->rx_ring0 = NULL; 766 dma->rx_ring0 = NULL;
767err_destroy_tx5:
768 bcm43xx_destroy_dmaring(dma->tx_ring5);
769 dma->tx_ring5 = NULL;
770err_destroy_tx4:
771 bcm43xx_destroy_dmaring(dma->tx_ring4);
772 dma->tx_ring4 = NULL;
608err_destroy_tx3: 773err_destroy_tx3:
609 bcm43xx_destroy_dmaring(dma->tx_ring3); 774 bcm43xx_destroy_dmaring(dma->tx_ring3);
610 dma->tx_ring3 = NULL; 775 dma->tx_ring3 = NULL;
@@ -624,7 +789,7 @@ err_destroy_tx0:
624static u16 generate_cookie(struct bcm43xx_dmaring *ring, 789static u16 generate_cookie(struct bcm43xx_dmaring *ring,
625 int slot) 790 int slot)
626{ 791{
627 u16 cookie = 0xF000; 792 u16 cookie = 0x1000;
628 793
629 /* Use the upper 4 bits of the cookie as 794 /* Use the upper 4 bits of the cookie as
630 * DMA controller ID and store the slot number 795 * DMA controller ID and store the slot number
@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring,
632 * Note that the cookie must never be 0, as this 797 * Note that the cookie must never be 0, as this
633 * is a special value used in RX path. 798 * is a special value used in RX path.
634 */ 799 */
635 switch (ring->mmio_base) { 800 switch (ring->index) {
636 default: 801 case 0:
637 assert(0);
638 case BCM43xx_MMIO_DMA1_BASE:
639 cookie = 0xA000; 802 cookie = 0xA000;
640 break; 803 break;
641 case BCM43xx_MMIO_DMA2_BASE: 804 case 1:
642 cookie = 0xB000; 805 cookie = 0xB000;
643 break; 806 break;
644 case BCM43xx_MMIO_DMA3_BASE: 807 case 2:
645 cookie = 0xC000; 808 cookie = 0xC000;
646 break; 809 break;
647 case BCM43xx_MMIO_DMA4_BASE: 810 case 3:
648 cookie = 0xD000; 811 cookie = 0xD000;
649 break; 812 break;
813 case 4:
814 cookie = 0xE000;
815 break;
816 case 5:
817 cookie = 0xF000;
818 break;
650 } 819 }
651 assert(((u16)slot & 0xF000) == 0x0000); 820 assert(((u16)slot & 0xF000) == 0x0000);
652 cookie |= (u16)slot; 821 cookie |= (u16)slot;
@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
675 case 0xD000: 844 case 0xD000:
676 ring = dma->tx_ring3; 845 ring = dma->tx_ring3;
677 break; 846 break;
847 case 0xE000:
848 ring = dma->tx_ring4;
849 break;
850 case 0xF000:
851 ring = dma->tx_ring5;
852 break;
678 default: 853 default:
679 assert(0); 854 assert(0);
680 } 855 }
@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
687static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, 862static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
688 int slot) 863 int slot)
689{ 864{
865 u16 offset;
866 int descsize;
867
690 /* Everything is ready to start. Buffers are DMA mapped and 868 /* Everything is ready to start. Buffers are DMA mapped and
691 * associated with slots. 869 * associated with slots.
692 * "slot" is the last slot of the new frame we want to transmit. 870 * "slot" is the last slot of the new frame we want to transmit.
@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
694 */ 872 */
695 wmb(); 873 wmb();
696 slot = next_slot(ring, slot); 874 slot = next_slot(ring, slot);
697 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX, 875 offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
698 (u32)(slot * sizeof(struct bcm43xx_dmadesc))); 876 descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
877 : sizeof(struct bcm43xx_dmadesc32);
878 bcm43xx_dma_write(ring, offset,
879 (u32)(slot * descsize));
699} 880}
700 881
701static int dma_tx_fragment(struct bcm43xx_dmaring *ring, 882static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
702 struct sk_buff *skb, 883 struct sk_buff *skb,
703 u8 cur_frag) 884 u8 cur_frag)
704{ 885{
705 int slot; 886 int slot;
706 struct bcm43xx_dmadesc *desc; 887 struct bcm43xx_dmadesc_generic *desc;
707 struct bcm43xx_dmadesc_meta *meta; 888 struct bcm43xx_dmadesc_meta *meta;
708 u32 desc_ctl; 889 dma_addr_t dmaaddr;
709 u32 desc_addr;
710 890
711 assert(skb_shinfo(skb)->nr_frags == 0); 891 assert(skb_shinfo(skb)->nr_frags == 0);
712 892
713 slot = request_slot(ring); 893 slot = request_slot(ring);
714 desc = ring->vbase + slot; 894 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
715 meta = ring->meta + slot;
716 895
717 /* Add a device specific TX header. */ 896 /* Add a device specific TX header. */
718 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); 897 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
729 generate_cookie(ring, slot)); 908 generate_cookie(ring, slot));
730 909
731 meta->skb = skb; 910 meta->skb = skb;
732 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 911 dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
733 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { 912 meta->dmaaddr = dmaaddr;
734 return_slot(ring, slot);
735 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
736 "(0x%llx, len: %u)\n",
737 (unsigned long long)meta->dmaaddr, skb->len);
738 return -ENOMEM;
739 }
740 913
741 desc_addr = (u32)(meta->dmaaddr + ring->memoffset); 914 fill_descriptor(ring, desc, dmaaddr,
742 desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND; 915 skb->len, 1, 1, 1);
743 desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
744 desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
745 (u32)(meta->skb->len - ring->frameoffset));
746 if (slot == ring->nr_slots - 1)
747 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
748 916
749 set_desc_ctl(desc, desc_ctl);
750 set_desc_addr(desc, desc_addr);
751 /* Now transfer the whole frame. */ 917 /* Now transfer the whole frame. */
752 dmacontroller_poke_tx(ring, slot); 918 dmacontroller_poke_tx(ring, slot);
753
754 return 0;
755} 919}
756 920
757int bcm43xx_dma_tx(struct bcm43xx_private *bcm, 921int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
781 /* Take skb from ieee80211_txb_free */ 945 /* Take skb from ieee80211_txb_free */
782 txb->fragments[i] = NULL; 946 txb->fragments[i] = NULL;
783 dma_tx_fragment(ring, skb, i); 947 dma_tx_fragment(ring, skb, i);
784 //TODO: handle failure of dma_tx_fragment
785 } 948 }
786 ieee80211_txb_free(txb); 949 ieee80211_txb_free(txb);
787 950
@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
792 struct bcm43xx_xmitstatus *status) 955 struct bcm43xx_xmitstatus *status)
793{ 956{
794 struct bcm43xx_dmaring *ring; 957 struct bcm43xx_dmaring *ring;
795 struct bcm43xx_dmadesc *desc; 958 struct bcm43xx_dmadesc_generic *desc;
796 struct bcm43xx_dmadesc_meta *meta; 959 struct bcm43xx_dmadesc_meta *meta;
797 int is_last_fragment; 960 int is_last_fragment;
798 int slot; 961 int slot;
962 u32 tmp;
799 963
800 ring = parse_cookie(bcm, status->cookie, &slot); 964 ring = parse_cookie(bcm, status->cookie, &slot);
801 assert(ring); 965 assert(ring);
802 assert(ring->tx); 966 assert(ring->tx);
803 assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
804 while (1) { 967 while (1) {
805 assert(slot >= 0 && slot < ring->nr_slots); 968 assert(slot >= 0 && slot < ring->nr_slots);
806 desc = ring->vbase + slot; 969 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
807 meta = ring->meta + slot;
808 970
809 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND); 971 if (ring->dma64) {
972 tmp = le32_to_cpu(desc->dma64.control0);
973 is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
974 } else {
975 tmp = le32_to_cpu(desc->dma32.control);
976 is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
977 }
810 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 978 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
811 free_descriptor_buffer(ring, desc, meta, 1); 979 free_descriptor_buffer(ring, meta, 1);
812 /* Everything belonging to the slot is unmapped 980 /* Everything belonging to the slot is unmapped
813 * and freed, so we can return it. 981 * and freed, so we can return it.
814 */ 982 */
@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
824static void dma_rx(struct bcm43xx_dmaring *ring, 992static void dma_rx(struct bcm43xx_dmaring *ring,
825 int *slot) 993 int *slot)
826{ 994{
827 struct bcm43xx_dmadesc *desc; 995 struct bcm43xx_dmadesc_generic *desc;
828 struct bcm43xx_dmadesc_meta *meta; 996 struct bcm43xx_dmadesc_meta *meta;
829 struct bcm43xx_rxhdr *rxhdr; 997 struct bcm43xx_rxhdr *rxhdr;
830 struct sk_buff *skb; 998 struct sk_buff *skb;
@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
832 int err; 1000 int err;
833 dma_addr_t dmaaddr; 1001 dma_addr_t dmaaddr;
834 1002
835 desc = ring->vbase + *slot; 1003 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
836 meta = ring->meta + *slot;
837 1004
838 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1005 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
839 skb = meta->skb; 1006 skb = meta->skb;
840 1007
841 if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) { 1008 if (ring->index == 3) {
842 /* We received an xmit status. */ 1009 /* We received an xmit status. */
843 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 1010 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
844 struct bcm43xx_xmitstatus stat; 1011 struct bcm43xx_xmitstatus stat;
@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
894 s32 tmp = len; 1061 s32 tmp = len;
895 1062
896 while (1) { 1063 while (1) {
897 desc = ring->vbase + *slot; 1064 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
898 meta = ring->meta + *slot;
899 /* recycle the descriptor buffer. */ 1065 /* recycle the descriptor buffer. */
900 sync_descbuffer_for_device(ring, meta->dmaaddr, 1066 sync_descbuffer_for_device(ring, meta->dmaaddr,
901 ring->rx_buffersize); 1067 ring->rx_buffersize);
@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
906 break; 1072 break;
907 } 1073 }
908 printkl(KERN_ERR PFX "DMA RX buffer too small " 1074 printkl(KERN_ERR PFX "DMA RX buffer too small "
909 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1075 "(len: %u, buffer: %u, nr-dropped: %d)\n",
910 len, ring->rx_buffersize, cnt); 1076 len, ring->rx_buffersize, cnt);
911 goto drop; 1077 goto drop;
912 } 1078 }
913 len -= IEEE80211_FCS_LEN; 1079 len -= IEEE80211_FCS_LEN;
@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
945#endif 1111#endif
946 1112
947 assert(!ring->tx); 1113 assert(!ring->tx);
948 status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS); 1114 if (ring->dma64) {
949 descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK); 1115 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
950 current_slot = descptr / sizeof(struct bcm43xx_dmadesc); 1116 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1117 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1118 } else {
1119 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1120 descptr = (status & BCM43xx_DMA32_RXDPTR);
1121 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1122 }
951 assert(current_slot >= 0 && current_slot < ring->nr_slots); 1123 assert(current_slot >= 0 && current_slot < ring->nr_slots);
952 1124
953 slot = ring->current_slot; 1125 slot = ring->current_slot;
@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
958 ring->max_used_slots = used_slots; 1130 ring->max_used_slots = used_slots;
959#endif 1131#endif
960 } 1132 }
961 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 1133 if (ring->dma64) {
962 (u32)(slot * sizeof(struct bcm43xx_dmadesc))); 1134 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1135 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1136 } else {
1137 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1138 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1139 }
963 ring->current_slot = slot; 1140 ring->current_slot = slot;
964} 1141}
965 1142
@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
967{ 1144{
968 assert(ring->tx); 1145 assert(ring->tx);
969 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); 1146 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
970 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 1147 if (ring->dma64) {
971 bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) 1148 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
972 | BCM43xx_DMA_TXCTRL_SUSPEND); 1149 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1150 | BCM43xx_DMA64_TXSUSPEND);
1151 } else {
1152 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1153 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1154 | BCM43xx_DMA32_TXSUSPEND);
1155 }
973} 1156}
974 1157
975void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) 1158void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
976{ 1159{
977 assert(ring->tx); 1160 assert(ring->tx);
978 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 1161 if (ring->dma64) {
979 bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) 1162 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
980 & ~BCM43xx_DMA_TXCTRL_SUSPEND); 1163 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1164 & ~BCM43xx_DMA64_TXSUSPEND);
1165 } else {
1166 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1167 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1168 & ~BCM43xx_DMA32_TXSUSPEND);
1169 }
981 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); 1170 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
982} 1171}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
index b7d77638ba8c..e04bcaddd1d0 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
@@ -14,63 +14,179 @@
14#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13) 14#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13)
15#define BCM43xx_DMAIRQ_RX_DONE (1 << 16) 15#define BCM43xx_DMAIRQ_RX_DONE (1 << 16)
16 16
17/* DMA controller register offsets. (relative to BCM43xx_DMA#_BASE) */ 17
18#define BCM43xx_DMA_TX_CONTROL 0x00 18/*** 32-bit DMA Engine. ***/
19#define BCM43xx_DMA_TX_DESC_RING 0x04 19
20#define BCM43xx_DMA_TX_DESC_INDEX 0x08 20/* 32-bit DMA controller registers. */
21#define BCM43xx_DMA_TX_STATUS 0x0c 21#define BCM43xx_DMA32_TXCTL 0x00
22#define BCM43xx_DMA_RX_CONTROL 0x10 22#define BCM43xx_DMA32_TXENABLE 0x00000001
23#define BCM43xx_DMA_RX_DESC_RING 0x14 23#define BCM43xx_DMA32_TXSUSPEND 0x00000002
24#define BCM43xx_DMA_RX_DESC_INDEX 0x18 24#define BCM43xx_DMA32_TXLOOPBACK 0x00000004
25#define BCM43xx_DMA_RX_STATUS 0x1c 25#define BCM43xx_DMA32_TXFLUSH 0x00000010
26 26#define BCM43xx_DMA32_TXADDREXT_MASK 0x00030000
27/* DMA controller channel control word values. */ 27#define BCM43xx_DMA32_TXADDREXT_SHIFT 16
28#define BCM43xx_DMA_TXCTRL_ENABLE (1 << 0) 28#define BCM43xx_DMA32_TXRING 0x04
29#define BCM43xx_DMA_TXCTRL_SUSPEND (1 << 1) 29#define BCM43xx_DMA32_TXINDEX 0x08
30#define BCM43xx_DMA_TXCTRL_LOOPBACK (1 << 2) 30#define BCM43xx_DMA32_TXSTATUS 0x0C
31#define BCM43xx_DMA_TXCTRL_FLUSH (1 << 4) 31#define BCM43xx_DMA32_TXDPTR 0x00000FFF
32#define BCM43xx_DMA_RXCTRL_ENABLE (1 << 0) 32#define BCM43xx_DMA32_TXSTATE 0x0000F000
33#define BCM43xx_DMA_RXCTRL_FRAMEOFF_MASK 0x000000fe 33#define BCM43xx_DMA32_TXSTAT_DISABLED 0x00000000
34#define BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT 1 34#define BCM43xx_DMA32_TXSTAT_ACTIVE 0x00001000
35#define BCM43xx_DMA_RXCTRL_PIO (1 << 8) 35#define BCM43xx_DMA32_TXSTAT_IDLEWAIT 0x00002000
36/* DMA controller channel status word values. */ 36#define BCM43xx_DMA32_TXSTAT_STOPPED 0x00003000
37#define BCM43xx_DMA_TXSTAT_DPTR_MASK 0x00000fff 37#define BCM43xx_DMA32_TXSTAT_SUSP 0x00004000
38#define BCM43xx_DMA_TXSTAT_STAT_MASK 0x0000f000 38#define BCM43xx_DMA32_TXERROR 0x000F0000
39#define BCM43xx_DMA_TXSTAT_STAT_DISABLED 0x00000000 39#define BCM43xx_DMA32_TXERR_NOERR 0x00000000
40#define BCM43xx_DMA_TXSTAT_STAT_ACTIVE 0x00001000 40#define BCM43xx_DMA32_TXERR_PROT 0x00010000
41#define BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT 0x00002000 41#define BCM43xx_DMA32_TXERR_UNDERRUN 0x00020000
42#define BCM43xx_DMA_TXSTAT_STAT_STOPPED 0x00003000 42#define BCM43xx_DMA32_TXERR_BUFREAD 0x00030000
43#define BCM43xx_DMA_TXSTAT_STAT_SUSP 0x00004000 43#define BCM43xx_DMA32_TXERR_DESCREAD 0x00040000
44#define BCM43xx_DMA_TXSTAT_ERROR_MASK 0x000f0000 44#define BCM43xx_DMA32_TXACTIVE 0xFFF00000
45#define BCM43xx_DMA_TXSTAT_FLUSHED (1 << 20) 45#define BCM43xx_DMA32_RXCTL 0x10
46#define BCM43xx_DMA_RXSTAT_DPTR_MASK 0x00000fff 46#define BCM43xx_DMA32_RXENABLE 0x00000001
47#define BCM43xx_DMA_RXSTAT_STAT_MASK 0x0000f000 47#define BCM43xx_DMA32_RXFROFF_MASK 0x000000FE
48#define BCM43xx_DMA_RXSTAT_STAT_DISABLED 0x00000000 48#define BCM43xx_DMA32_RXFROFF_SHIFT 1
49#define BCM43xx_DMA_RXSTAT_STAT_ACTIVE 0x00001000 49#define BCM43xx_DMA32_RXDIRECTFIFO 0x00000100
50#define BCM43xx_DMA_RXSTAT_STAT_IDLEWAIT 0x00002000 50#define BCM43xx_DMA32_RXADDREXT_MASK 0x00030000
51#define BCM43xx_DMA_RXSTAT_STAT_RESERVED 0x00003000 51#define BCM43xx_DMA32_RXADDREXT_SHIFT 16
52#define BCM43xx_DMA_RXSTAT_STAT_ERRORS 0x00004000 52#define BCM43xx_DMA32_RXRING 0x14
53#define BCM43xx_DMA_RXSTAT_ERROR_MASK 0x000f0000 53#define BCM43xx_DMA32_RXINDEX 0x18
54 54#define BCM43xx_DMA32_RXSTATUS 0x1C
55/* DMA descriptor control field values. */ 55#define BCM43xx_DMA32_RXDPTR 0x00000FFF
56#define BCM43xx_DMADTOR_BYTECNT_MASK 0x00001fff 56#define BCM43xx_DMA32_RXSTATE 0x0000F000
57#define BCM43xx_DMADTOR_DTABLEEND (1 << 28) /* End of descriptor table */ 57#define BCM43xx_DMA32_RXSTAT_DISABLED 0x00000000
58#define BCM43xx_DMADTOR_COMPIRQ (1 << 29) /* IRQ on completion request */ 58#define BCM43xx_DMA32_RXSTAT_ACTIVE 0x00001000
59#define BCM43xx_DMADTOR_FRAMEEND (1 << 30) 59#define BCM43xx_DMA32_RXSTAT_IDLEWAIT 0x00002000
60#define BCM43xx_DMADTOR_FRAMESTART (1 << 31) 60#define BCM43xx_DMA32_RXSTAT_STOPPED 0x00003000
61#define BCM43xx_DMA32_RXERROR 0x000F0000
62#define BCM43xx_DMA32_RXERR_NOERR 0x00000000
63#define BCM43xx_DMA32_RXERR_PROT 0x00010000
64#define BCM43xx_DMA32_RXERR_OVERFLOW 0x00020000
65#define BCM43xx_DMA32_RXERR_BUFWRITE 0x00030000
66#define BCM43xx_DMA32_RXERR_DESCREAD 0x00040000
67#define BCM43xx_DMA32_RXACTIVE 0xFFF00000
68
69/* 32-bit DMA descriptor. */
70struct bcm43xx_dmadesc32 {
71 __le32 control;
72 __le32 address;
73} __attribute__((__packed__));
74#define BCM43xx_DMA32_DCTL_BYTECNT 0x00001FFF
75#define BCM43xx_DMA32_DCTL_ADDREXT_MASK 0x00030000
76#define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT 16
77#define BCM43xx_DMA32_DCTL_DTABLEEND 0x10000000
78#define BCM43xx_DMA32_DCTL_IRQ 0x20000000
79#define BCM43xx_DMA32_DCTL_FRAMEEND 0x40000000
80#define BCM43xx_DMA32_DCTL_FRAMESTART 0x80000000
81
82/* Address field Routing value. */
83#define BCM43xx_DMA32_ROUTING 0xC0000000
84#define BCM43xx_DMA32_ROUTING_SHIFT 30
85#define BCM43xx_DMA32_NOTRANS 0x00000000
86#define BCM43xx_DMA32_CLIENTTRANS 0x40000000
87
88
89
90/*** 64-bit DMA Engine. ***/
91
92/* 64-bit DMA controller registers. */
93#define BCM43xx_DMA64_TXCTL 0x00
94#define BCM43xx_DMA64_TXENABLE 0x00000001
95#define BCM43xx_DMA64_TXSUSPEND 0x00000002
96#define BCM43xx_DMA64_TXLOOPBACK 0x00000004
97#define BCM43xx_DMA64_TXFLUSH 0x00000010
98#define BCM43xx_DMA64_TXADDREXT_MASK 0x00030000
99#define BCM43xx_DMA64_TXADDREXT_SHIFT 16
100#define BCM43xx_DMA64_TXINDEX 0x04
101#define BCM43xx_DMA64_TXRINGLO 0x08
102#define BCM43xx_DMA64_TXRINGHI 0x0C
103#define BCM43xx_DMA64_TXSTATUS 0x10
104#define BCM43xx_DMA64_TXSTATDPTR 0x00001FFF
105#define BCM43xx_DMA64_TXSTAT 0xF0000000
106#define BCM43xx_DMA64_TXSTAT_DISABLED 0x00000000
107#define BCM43xx_DMA64_TXSTAT_ACTIVE 0x10000000
108#define BCM43xx_DMA64_TXSTAT_IDLEWAIT 0x20000000
109#define BCM43xx_DMA64_TXSTAT_STOPPED 0x30000000
110#define BCM43xx_DMA64_TXSTAT_SUSP 0x40000000
111#define BCM43xx_DMA64_TXERROR 0x14
112#define BCM43xx_DMA64_TXERRDPTR 0x0001FFFF
113#define BCM43xx_DMA64_TXERR 0xF0000000
114#define BCM43xx_DMA64_TXERR_NOERR 0x00000000
115#define BCM43xx_DMA64_TXERR_PROT 0x10000000
116#define BCM43xx_DMA64_TXERR_UNDERRUN 0x20000000
117#define BCM43xx_DMA64_TXERR_TRANSFER 0x30000000
118#define BCM43xx_DMA64_TXERR_DESCREAD 0x40000000
119#define BCM43xx_DMA64_TXERR_CORE 0x50000000
120#define BCM43xx_DMA64_RXCTL 0x20
121#define BCM43xx_DMA64_RXENABLE 0x00000001
122#define BCM43xx_DMA64_RXFROFF_MASK 0x000000FE
123#define BCM43xx_DMA64_RXFROFF_SHIFT 1
124#define BCM43xx_DMA64_RXDIRECTFIFO 0x00000100
125#define BCM43xx_DMA64_RXADDREXT_MASK 0x00030000
126#define BCM43xx_DMA64_RXADDREXT_SHIFT 16
127#define BCM43xx_DMA64_RXINDEX 0x24
128#define BCM43xx_DMA64_RXRINGLO 0x28
129#define BCM43xx_DMA64_RXRINGHI 0x2C
130#define BCM43xx_DMA64_RXSTATUS 0x30
131#define BCM43xx_DMA64_RXSTATDPTR 0x00001FFF
132#define BCM43xx_DMA64_RXSTAT 0xF0000000
133#define BCM43xx_DMA64_RXSTAT_DISABLED 0x00000000
134#define BCM43xx_DMA64_RXSTAT_ACTIVE 0x10000000
135#define BCM43xx_DMA64_RXSTAT_IDLEWAIT 0x20000000
136#define BCM43xx_DMA64_RXSTAT_STOPPED 0x30000000
137#define BCM43xx_DMA64_RXSTAT_SUSP 0x40000000
138#define BCM43xx_DMA64_RXERROR 0x34
139#define BCM43xx_DMA64_RXERRDPTR 0x0001FFFF
140#define BCM43xx_DMA64_RXERR 0xF0000000
141#define BCM43xx_DMA64_RXERR_NOERR 0x00000000
142#define BCM43xx_DMA64_RXERR_PROT 0x10000000
143#define BCM43xx_DMA64_RXERR_UNDERRUN 0x20000000
144#define BCM43xx_DMA64_RXERR_TRANSFER 0x30000000
145#define BCM43xx_DMA64_RXERR_DESCREAD 0x40000000
146#define BCM43xx_DMA64_RXERR_CORE 0x50000000
147
148/* 64-bit DMA descriptor. */
149struct bcm43xx_dmadesc64 {
150 __le32 control0;
151 __le32 control1;
152 __le32 address_low;
153 __le32 address_high;
154} __attribute__((__packed__));
155#define BCM43xx_DMA64_DCTL0_DTABLEEND 0x10000000
156#define BCM43xx_DMA64_DCTL0_IRQ 0x20000000
157#define BCM43xx_DMA64_DCTL0_FRAMEEND 0x40000000
158#define BCM43xx_DMA64_DCTL0_FRAMESTART 0x80000000
159#define BCM43xx_DMA64_DCTL1_BYTECNT 0x00001FFF
160#define BCM43xx_DMA64_DCTL1_ADDREXT_MASK 0x00030000
161#define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT 16
162
163/* Address field Routing value. */
164#define BCM43xx_DMA64_ROUTING 0xC0000000
165#define BCM43xx_DMA64_ROUTING_SHIFT 30
166#define BCM43xx_DMA64_NOTRANS 0x00000000
167#define BCM43xx_DMA64_CLIENTTRANS 0x80000000
168
169
170
171struct bcm43xx_dmadesc_generic {
172 union {
173 struct bcm43xx_dmadesc32 dma32;
174 struct bcm43xx_dmadesc64 dma64;
175 } __attribute__((__packed__));
176} __attribute__((__packed__));
177
61 178
62/* Misc DMA constants */ 179/* Misc DMA constants */
63#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE 180#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE
64#define BCM43xx_DMA_BUSADDRMAX 0x3FFFFFFF 181#define BCM43xx_DMA0_RX_FRAMEOFFSET 30
65#define BCM43xx_DMA_DMABUSADDROFFSET (1 << 30) 182#define BCM43xx_DMA3_RX_FRAMEOFFSET 0
66#define BCM43xx_DMA1_RX_FRAMEOFFSET 30 183
67#define BCM43xx_DMA4_RX_FRAMEOFFSET 0
68 184
69/* DMA engine tuning knobs */ 185/* DMA engine tuning knobs */
70#define BCM43xx_TXRING_SLOTS 512 186#define BCM43xx_TXRING_SLOTS 512
71#define BCM43xx_RXRING_SLOTS 64 187#define BCM43xx_RXRING_SLOTS 64
72#define BCM43xx_DMA1_RXBUFFERSIZE (2304 + 100) 188#define BCM43xx_DMA0_RX_BUFFERSIZE (2304 + 100)
73#define BCM43xx_DMA4_RXBUFFERSIZE 16 189#define BCM43xx_DMA3_RX_BUFFERSIZE 16
74/* Suspend the tx queue, if less than this percent slots are free. */ 190/* Suspend the tx queue, if less than this percent slots are free. */
75#define BCM43xx_TXSUSPEND_PERCENT 20 191#define BCM43xx_TXSUSPEND_PERCENT 20
76/* Resume the tx queue, if more than this percent slots are free. */ 192/* Resume the tx queue, if more than this percent slots are free. */
@@ -86,17 +202,6 @@ struct bcm43xx_private;
86struct bcm43xx_xmitstatus; 202struct bcm43xx_xmitstatus;
87 203
88 204
89struct bcm43xx_dmadesc {
90 __le32 _control;
91 __le32 _address;
92} __attribute__((__packed__));
93
94/* Macros to access the bcm43xx_dmadesc struct */
95#define get_desc_ctl(desc) le32_to_cpu((desc)->_control)
96#define set_desc_ctl(desc, ctl) do { (desc)->_control = cpu_to_le32(ctl); } while (0)
97#define get_desc_addr(desc) le32_to_cpu((desc)->_address)
98#define set_desc_addr(desc, addr) do { (desc)->_address = cpu_to_le32(addr); } while (0)
99
100struct bcm43xx_dmadesc_meta { 205struct bcm43xx_dmadesc_meta {
101 /* The kernel DMA-able buffer. */ 206 /* The kernel DMA-able buffer. */
102 struct sk_buff *skb; 207 struct sk_buff *skb;
@@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta {
105}; 210};
106 211
107struct bcm43xx_dmaring { 212struct bcm43xx_dmaring {
108 struct bcm43xx_private *bcm;
109 /* Kernel virtual base address of the ring memory. */ 213 /* Kernel virtual base address of the ring memory. */
110 struct bcm43xx_dmadesc *vbase; 214 void *descbase;
111 /* DMA memory offset */
112 dma_addr_t memoffset;
113 /* (Unadjusted) DMA base bus-address of the ring memory. */
114 dma_addr_t dmabase;
115 /* Meta data about all descriptors. */ 215 /* Meta data about all descriptors. */
116 struct bcm43xx_dmadesc_meta *meta; 216 struct bcm43xx_dmadesc_meta *meta;
217 /* DMA Routing value. */
218 u32 routing;
219 /* (Unadjusted) DMA base bus-address of the ring memory. */
220 dma_addr_t dmabase;
117 /* Number of descriptor slots in the ring. */ 221 /* Number of descriptor slots in the ring. */
118 int nr_slots; 222 int nr_slots;
119 /* Number of used descriptor slots. */ 223 /* Number of used descriptor slots. */
@@ -127,12 +231,17 @@ struct bcm43xx_dmaring {
127 u32 frameoffset; 231 u32 frameoffset;
128 /* Descriptor buffer size. */ 232 /* Descriptor buffer size. */
129 u16 rx_buffersize; 233 u16 rx_buffersize;
130 /* The MMIO base register of the DMA controller, this 234 /* The MMIO base register of the DMA controller. */
131 * ring is posted to.
132 */
133 u16 mmio_base; 235 u16 mmio_base;
134 u8 tx:1, /* TRUE, if this is a TX ring. */ 236 /* DMA controller index number (0-5). */
135 suspended:1; /* TRUE, if transfers are suspended on this ring. */ 237 int index;
238 /* Boolean. Is this a TX ring? */
239 u8 tx;
240 /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
241 u8 dma64;
242 /* Boolean. Are transfers suspended on this ring? */
243 u8 suspended;
244 struct bcm43xx_private *bcm;
136#ifdef CONFIG_BCM43XX_DEBUG 245#ifdef CONFIG_BCM43XX_DEBUG
137 /* Maximum number of used slots. */ 246 /* Maximum number of used slots. */
138 int max_used_slots; 247 int max_used_slots;
@@ -141,6 +250,34 @@ struct bcm43xx_dmaring {
141 250
142 251
143static inline 252static inline
253int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring,
254 struct bcm43xx_dmadesc_generic *desc)
255{
256 if (ring->dma64) {
257 struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
258 return (int)(&(desc->dma64) - dd64);
259 } else {
260 struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
261 return (int)(&(desc->dma32) - dd32);
262 }
263}
264
265static inline
266struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring,
267 int slot,
268 struct bcm43xx_dmadesc_meta **meta)
269{
270 *meta = &(ring->meta[slot]);
271 if (ring->dma64) {
272 struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
273 return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot]));
274 } else {
275 struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
276 return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot]));
277 }
278}
279
280static inline
144u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring, 281u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring,
145 u16 offset) 282 u16 offset)
146{ 283{
@@ -159,9 +296,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm);
159void bcm43xx_dma_free(struct bcm43xx_private *bcm); 296void bcm43xx_dma_free(struct bcm43xx_private *bcm);
160 297
161int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 298int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
162 u16 dmacontroller_mmio_base); 299 u16 dmacontroller_mmio_base,
300 int dma64);
163int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 301int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
164 u16 dmacontroller_mmio_base); 302 u16 dmacontroller_mmio_base,
303 int dma64);
304
305u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx);
165 306
166void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring); 307void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring);
167void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring); 308void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring);
@@ -173,7 +314,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
173 struct ieee80211_txb *txb); 314 struct ieee80211_txb *txb);
174void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring); 315void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
175 316
176
177#else /* CONFIG_BCM43XX_DMA */ 317#else /* CONFIG_BCM43XX_DMA */
178 318
179 319
@@ -188,13 +328,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
188} 328}
189static inline 329static inline
190int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 330int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
191 u16 dmacontroller_mmio_base) 331 u16 dmacontroller_mmio_base,
332 int dma64)
192{ 333{
193 return 0; 334 return 0;
194} 335}
195static inline 336static inline
196int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 337int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
197 u16 dmacontroller_mmio_base) 338 u16 dmacontroller_mmio_base,
339 int dma64)
198{ 340{
199 return 0; 341 return 0;
200} 342}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index ec80692d638a..c3f90c8563d9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -51,12 +51,12 @@ static void bcm43xx_led_blink(unsigned long d)
51 struct bcm43xx_private *bcm = led->bcm; 51 struct bcm43xx_private *bcm = led->bcm;
52 unsigned long flags; 52 unsigned long flags;
53 53
54 bcm43xx_lock_irqonly(bcm, flags); 54 spin_lock_irqsave(&bcm->leds_lock, flags);
55 if (led->blink_interval) { 55 if (led->blink_interval) {
56 bcm43xx_led_changestate(led); 56 bcm43xx_led_changestate(led);
57 mod_timer(&led->blink_timer, jiffies + led->blink_interval); 57 mod_timer(&led->blink_timer, jiffies + led->blink_interval);
58 } 58 }
59 bcm43xx_unlock_irqonly(bcm, flags); 59 spin_unlock_irqrestore(&bcm->leds_lock, flags);
60} 60}
61 61
62static void bcm43xx_led_blink_start(struct bcm43xx_led *led, 62static void bcm43xx_led_blink_start(struct bcm43xx_led *led,
@@ -177,7 +177,9 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
177 int i, turn_on; 177 int i, turn_on;
178 unsigned long interval = 0; 178 unsigned long interval = 0;
179 u16 ledctl; 179 u16 ledctl;
180 unsigned long flags;
180 181
182 spin_lock_irqsave(&bcm->leds_lock, flags);
181 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL); 183 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
182 for (i = 0; i < BCM43xx_NR_LEDS; i++) { 184 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
183 led = &(bcm->leds[i]); 185 led = &(bcm->leds[i]);
@@ -266,6 +268,7 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
266 ledctl &= ~(1 << i); 268 ledctl &= ~(1 << i);
267 } 269 }
268 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl); 270 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
271 spin_unlock_irqrestore(&bcm->leds_lock, flags);
269} 272}
270 273
271void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on) 274void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
@@ -274,7 +277,9 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
274 u16 ledctl; 277 u16 ledctl;
275 int i; 278 int i;
276 int bit_on; 279 int bit_on;
280 unsigned long flags;
277 281
282 spin_lock_irqsave(&bcm->leds_lock, flags);
278 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL); 283 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
279 for (i = 0; i < BCM43xx_NR_LEDS; i++) { 284 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
280 led = &(bcm->leds[i]); 285 led = &(bcm->leds[i]);
@@ -290,4 +295,5 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
290 ledctl &= ~(1 << i); 295 ledctl &= ~(1 << i);
291 } 296 }
292 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl); 297 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
298 spin_unlock_irqrestore(&bcm->leds_lock, flags);
293} 299}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index 3889f79e7128..cb9a3ae8463a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -509,23 +509,20 @@ static void bcm43xx_synchronize_irq(struct bcm43xx_private *bcm)
509} 509}
510 510
511/* Make sure we don't receive more data from the device. */ 511/* Make sure we don't receive more data from the device. */
512static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *oldstate) 512static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm)
513{ 513{
514 unsigned long flags; 514 unsigned long flags;
515 u32 old;
516 515
517 bcm43xx_lock_irqonly(bcm, flags); 516 spin_lock_irqsave(&bcm->irq_lock, flags);
518 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) { 517 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) {
519 bcm43xx_unlock_irqonly(bcm, flags); 518 spin_unlock_irqrestore(&bcm->irq_lock, flags);
520 return -EBUSY; 519 return -EBUSY;
521 } 520 }
522 old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 521 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
523 bcm43xx_unlock_irqonly(bcm, flags); 522 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK); /* flush */
523 spin_unlock_irqrestore(&bcm->irq_lock, flags);
524 bcm43xx_synchronize_irq(bcm); 524 bcm43xx_synchronize_irq(bcm);
525 525
526 if (oldstate)
527 *oldstate = old;
528
529 return 0; 526 return 0;
530} 527}
531 528
@@ -537,7 +534,6 @@ static int bcm43xx_read_radioinfo(struct bcm43xx_private *bcm)
537 u16 manufact; 534 u16 manufact;
538 u16 version; 535 u16 version;
539 u8 revision; 536 u8 revision;
540 s8 i;
541 537
542 if (bcm->chip_id == 0x4317) { 538 if (bcm->chip_id == 0x4317) {
543 if (bcm->chip_rev == 0x00) 539 if (bcm->chip_rev == 0x00)
@@ -580,20 +576,11 @@ static int bcm43xx_read_radioinfo(struct bcm43xx_private *bcm)
580 radio->version = version; 576 radio->version = version;
581 radio->revision = revision; 577 radio->revision = revision;
582 578
583 /* Set default attenuation values. */
584 radio->baseband_atten = bcm43xx_default_baseband_attenuation(bcm);
585 radio->radio_atten = bcm43xx_default_radio_attenuation(bcm);
586 radio->txctl1 = bcm43xx_default_txctl1(bcm);
587 radio->txctl2 = 0xFFFF;
588 if (phy->type == BCM43xx_PHYTYPE_A) 579 if (phy->type == BCM43xx_PHYTYPE_A)
589 radio->txpower_desired = bcm->sprom.maxpower_aphy; 580 radio->txpower_desired = bcm->sprom.maxpower_aphy;
590 else 581 else
591 radio->txpower_desired = bcm->sprom.maxpower_bgphy; 582 radio->txpower_desired = bcm->sprom.maxpower_bgphy;
592 583
593 /* Initialize the in-memory nrssi Lookup Table. */
594 for (i = 0; i < 64; i++)
595 radio->nrssi_lt[i] = i;
596
597 return 0; 584 return 0;
598 585
599err_unsupported_radio: 586err_unsupported_radio:
@@ -1250,10 +1237,6 @@ int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *ne
1250 goto out; 1237 goto out;
1251 1238
1252 bcm->current_core = new_core; 1239 bcm->current_core = new_core;
1253 bcm->current_80211_core_idx = -1;
1254 if (new_core->id == BCM43xx_COREID_80211)
1255 bcm->current_80211_core_idx = (int)(new_core - &(bcm->core_80211[0]));
1256
1257out: 1240out:
1258 return err; 1241 return err;
1259} 1242}
@@ -1389,6 +1372,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1389 if ((bcm43xx_core_enabled(bcm)) && 1372 if ((bcm43xx_core_enabled(bcm)) &&
1390 !bcm43xx_using_pio(bcm)) { 1373 !bcm43xx_using_pio(bcm)) {
1391//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here? 1374//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
1375#if 0
1392#ifndef CONFIG_BCM947XX 1376#ifndef CONFIG_BCM947XX
1393 /* reset all used DMA controllers. */ 1377 /* reset all used DMA controllers. */
1394 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE); 1378 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
@@ -1399,6 +1383,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1399 if (bcm->current_core->rev < 5) 1383 if (bcm->current_core->rev < 5)
1400 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE); 1384 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
1401#endif 1385#endif
1386#endif
1402 } 1387 }
1403 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { 1388 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
1404 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 1389 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
@@ -1423,43 +1408,23 @@ static void bcm43xx_wireless_core_disable(struct bcm43xx_private *bcm)
1423 bcm43xx_core_disable(bcm, 0); 1408 bcm43xx_core_disable(bcm, 0);
1424} 1409}
1425 1410
1426/* Mark the current 80211 core inactive. 1411/* Mark the current 80211 core inactive. */
1427 * "active_80211_core" is the other 80211 core, which is used. 1412static void bcm43xx_wireless_core_mark_inactive(struct bcm43xx_private *bcm)
1428 */
1429static int bcm43xx_wireless_core_mark_inactive(struct bcm43xx_private *bcm,
1430 struct bcm43xx_coreinfo *active_80211_core)
1431{ 1413{
1432 u32 sbtmstatelow; 1414 u32 sbtmstatelow;
1433 struct bcm43xx_coreinfo *old_core;
1434 int err = 0;
1435 1415
1436 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 1416 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
1437 bcm43xx_radio_turn_off(bcm); 1417 bcm43xx_radio_turn_off(bcm);
1438 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); 1418 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1439 sbtmstatelow &= ~0x200a0000; 1419 sbtmstatelow &= 0xDFF5FFFF;
1440 sbtmstatelow |= 0xa0000; 1420 sbtmstatelow |= 0x000A0000;
1441 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow); 1421 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1442 udelay(1); 1422 udelay(1);
1443 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); 1423 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1444 sbtmstatelow &= ~0xa0000; 1424 sbtmstatelow &= 0xFFF5FFFF;
1445 sbtmstatelow |= 0x80000; 1425 sbtmstatelow |= 0x00080000;
1446 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow); 1426 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1447 udelay(1); 1427 udelay(1);
1448
1449 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_G) {
1450 old_core = bcm->current_core;
1451 err = bcm43xx_switch_core(bcm, active_80211_core);
1452 if (err)
1453 goto out;
1454 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1455 sbtmstatelow &= ~0x20000000;
1456 sbtmstatelow |= 0x20000000;
1457 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1458 err = bcm43xx_switch_core(bcm, old_core);
1459 }
1460
1461out:
1462 return err;
1463} 1428}
1464 1429
1465static void handle_irq_transmit_status(struct bcm43xx_private *bcm) 1430static void handle_irq_transmit_status(struct bcm43xx_private *bcm)
@@ -1581,17 +1546,7 @@ static void handle_irq_noise(struct bcm43xx_private *bcm)
1581 else 1546 else
1582 average -= 48; 1547 average -= 48;
1583 1548
1584/* FIXME: This is wrong, but people want fancy stats. well... */ 1549 bcm->stats.noise = average;
1585bcm->stats.noise = average;
1586 if (average > -65)
1587 bcm->stats.link_quality = 0;
1588 else if (average > -75)
1589 bcm->stats.link_quality = 1;
1590 else if (average > -85)
1591 bcm->stats.link_quality = 2;
1592 else
1593 bcm->stats.link_quality = 3;
1594// dprintk(KERN_INFO PFX "Link Quality: %u (avg was %d)\n", bcm->stats.link_quality, average);
1595drop_calculation: 1550drop_calculation:
1596 bcm->noisecalc.calculation_running = 0; 1551 bcm->noisecalc.calculation_running = 0;
1597 return; 1552 return;
@@ -1709,8 +1664,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm)
1709static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) 1664static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1710{ 1665{
1711 u32 reason; 1666 u32 reason;
1712 u32 dma_reason[4]; 1667 u32 dma_reason[6];
1713 int activity = 0; 1668 u32 merged_dma_reason = 0;
1669 int i, activity = 0;
1714 unsigned long flags; 1670 unsigned long flags;
1715 1671
1716#ifdef CONFIG_BCM43XX_DEBUG 1672#ifdef CONFIG_BCM43XX_DEBUG
@@ -1720,12 +1676,12 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1720# define bcmirq_handled(irq) do { /* nothing */ } while (0) 1676# define bcmirq_handled(irq) do { /* nothing */ } while (0)
1721#endif /* CONFIG_BCM43XX_DEBUG*/ 1677#endif /* CONFIG_BCM43XX_DEBUG*/
1722 1678
1723 bcm43xx_lock_irqonly(bcm, flags); 1679 spin_lock_irqsave(&bcm->irq_lock, flags);
1724 reason = bcm->irq_reason; 1680 reason = bcm->irq_reason;
1725 dma_reason[0] = bcm->dma_reason[0]; 1681 for (i = 5; i >= 0; i--) {
1726 dma_reason[1] = bcm->dma_reason[1]; 1682 dma_reason[i] = bcm->dma_reason[i];
1727 dma_reason[2] = bcm->dma_reason[2]; 1683 merged_dma_reason |= dma_reason[i];
1728 dma_reason[3] = bcm->dma_reason[3]; 1684 }
1729 1685
1730 if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) { 1686 if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) {
1731 /* TX error. We get this when Template Ram is written in wrong endianess 1687 /* TX error. We get this when Template Ram is written in wrong endianess
@@ -1736,27 +1692,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1736 printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n"); 1692 printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n");
1737 bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR); 1693 bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR);
1738 } 1694 }
1739 if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_FATALMASK) | 1695 if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) {
1740 (dma_reason[1] & BCM43xx_DMAIRQ_FATALMASK) |
1741 (dma_reason[2] & BCM43xx_DMAIRQ_FATALMASK) |
1742 (dma_reason[3] & BCM43xx_DMAIRQ_FATALMASK))) {
1743 printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: " 1696 printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: "
1744 "0x%08X, 0x%08X, 0x%08X, 0x%08X\n", 1697 "0x%08X, 0x%08X, 0x%08X, "
1698 "0x%08X, 0x%08X, 0x%08X\n",
1745 dma_reason[0], dma_reason[1], 1699 dma_reason[0], dma_reason[1],
1746 dma_reason[2], dma_reason[3]); 1700 dma_reason[2], dma_reason[3],
1701 dma_reason[4], dma_reason[5]);
1747 bcm43xx_controller_restart(bcm, "DMA error"); 1702 bcm43xx_controller_restart(bcm, "DMA error");
1748 mmiowb(); 1703 mmiowb();
1749 bcm43xx_unlock_irqonly(bcm, flags); 1704 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1750 return; 1705 return;
1751 } 1706 }
1752 if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) | 1707 if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) {
1753 (dma_reason[1] & BCM43xx_DMAIRQ_NONFATALMASK) |
1754 (dma_reason[2] & BCM43xx_DMAIRQ_NONFATALMASK) |
1755 (dma_reason[3] & BCM43xx_DMAIRQ_NONFATALMASK))) {
1756 printkl(KERN_ERR PFX "DMA error: " 1708 printkl(KERN_ERR PFX "DMA error: "
1757 "0x%08X, 0x%08X, 0x%08X, 0x%08X\n", 1709 "0x%08X, 0x%08X, 0x%08X, "
1710 "0x%08X, 0x%08X, 0x%08X\n",
1758 dma_reason[0], dma_reason[1], 1711 dma_reason[0], dma_reason[1],
1759 dma_reason[2], dma_reason[3]); 1712 dma_reason[2], dma_reason[3],
1713 dma_reason[4], dma_reason[5]);
1760 } 1714 }
1761 1715
1762 if (reason & BCM43xx_IRQ_PS) { 1716 if (reason & BCM43xx_IRQ_PS) {
@@ -1791,8 +1745,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1791 } 1745 }
1792 1746
1793 /* Check the DMA reason registers for received data. */ 1747 /* Check the DMA reason registers for received data. */
1794 assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
1795 assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
1796 if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) { 1748 if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) {
1797 if (bcm43xx_using_pio(bcm)) 1749 if (bcm43xx_using_pio(bcm))
1798 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0); 1750 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0);
@@ -1800,13 +1752,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1800 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0); 1752 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0);
1801 /* We intentionally don't set "activity" to 1, here. */ 1753 /* We intentionally don't set "activity" to 1, here. */
1802 } 1754 }
1755 assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
1756 assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
1803 if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) { 1757 if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) {
1804 if (bcm43xx_using_pio(bcm)) 1758 if (bcm43xx_using_pio(bcm))
1805 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3); 1759 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3);
1806 else 1760 else
1807 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring1); 1761 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3);
1808 activity = 1; 1762 activity = 1;
1809 } 1763 }
1764 assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE));
1765 assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE));
1810 bcmirq_handled(BCM43xx_IRQ_RX); 1766 bcmirq_handled(BCM43xx_IRQ_RX);
1811 1767
1812 if (reason & BCM43xx_IRQ_XMIT_STATUS) { 1768 if (reason & BCM43xx_IRQ_XMIT_STATUS) {
@@ -1834,7 +1790,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1834 bcm43xx_leds_update(bcm, activity); 1790 bcm43xx_leds_update(bcm, activity);
1835 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate); 1791 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
1836 mmiowb(); 1792 mmiowb();
1837 bcm43xx_unlock_irqonly(bcm, flags); 1793 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1838} 1794}
1839 1795
1840static void pio_irq_workaround(struct bcm43xx_private *bcm, 1796static void pio_irq_workaround(struct bcm43xx_private *bcm,
@@ -1863,14 +1819,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason)
1863 1819
1864 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason); 1820 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason);
1865 1821
1866 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON, 1822 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON,
1867 bcm->dma_reason[0]); 1823 bcm->dma_reason[0]);
1868 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON, 1824 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
1869 bcm->dma_reason[1]); 1825 bcm->dma_reason[1]);
1870 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON, 1826 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
1871 bcm->dma_reason[2]); 1827 bcm->dma_reason[2]);
1872 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON, 1828 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
1873 bcm->dma_reason[3]); 1829 bcm->dma_reason[3]);
1830 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
1831 bcm->dma_reason[4]);
1832 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON,
1833 bcm->dma_reason[5]);
1874} 1834}
1875 1835
1876/* Interrupt handler top-half */ 1836/* Interrupt handler top-half */
@@ -1885,14 +1845,8 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
1885 1845
1886 spin_lock(&bcm->irq_lock); 1846 spin_lock(&bcm->irq_lock);
1887 1847
1888 /* Only accept IRQs, if we are initialized properly. 1848 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
1889 * This avoids an RX race while initializing. 1849 assert(bcm->current_core->id == BCM43xx_COREID_80211);
1890 * We should probably not enable IRQs before we are initialized
1891 * completely, but some careful work is needed to fix this. I think it
1892 * is best to stay with this cheap workaround for now... .
1893 */
1894 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED))
1895 goto out;
1896 1850
1897 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); 1851 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
1898 if (reason == 0xffffffff) { 1852 if (reason == 0xffffffff) {
@@ -1904,14 +1858,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
1904 if (!reason) 1858 if (!reason)
1905 goto out; 1859 goto out;
1906 1860
1907 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON) 1861 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
1908 & 0x0001dc00; 1862 & 0x0001DC00;
1909 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON) 1863 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
1910 & 0x0000dc00; 1864 & 0x0000DC00;
1911 bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON) 1865 bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
1912 & 0x0000dc00; 1866 & 0x0000DC00;
1913 bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON) 1867 bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
1914 & 0x0001dc00; 1868 & 0x0001DC00;
1869 bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
1870 & 0x0000DC00;
1871 bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON)
1872 & 0x0000DC00;
1915 1873
1916 bcm43xx_interrupt_ack(bcm, reason); 1874 bcm43xx_interrupt_ack(bcm, reason);
1917 1875
@@ -1930,16 +1888,18 @@ out:
1930 1888
1931static void bcm43xx_release_firmware(struct bcm43xx_private *bcm, int force) 1889static void bcm43xx_release_firmware(struct bcm43xx_private *bcm, int force)
1932{ 1890{
1891 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1892
1933 if (bcm->firmware_norelease && !force) 1893 if (bcm->firmware_norelease && !force)
1934 return; /* Suspending or controller reset. */ 1894 return; /* Suspending or controller reset. */
1935 release_firmware(bcm->ucode); 1895 release_firmware(phy->ucode);
1936 bcm->ucode = NULL; 1896 phy->ucode = NULL;
1937 release_firmware(bcm->pcm); 1897 release_firmware(phy->pcm);
1938 bcm->pcm = NULL; 1898 phy->pcm = NULL;
1939 release_firmware(bcm->initvals0); 1899 release_firmware(phy->initvals0);
1940 bcm->initvals0 = NULL; 1900 phy->initvals0 = NULL;
1941 release_firmware(bcm->initvals1); 1901 release_firmware(phy->initvals1);
1942 bcm->initvals1 = NULL; 1902 phy->initvals1 = NULL;
1943} 1903}
1944 1904
1945static int bcm43xx_request_firmware(struct bcm43xx_private *bcm) 1905static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
@@ -1950,11 +1910,11 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
1950 int nr; 1910 int nr;
1951 char buf[22 + sizeof(modparam_fwpostfix) - 1] = { 0 }; 1911 char buf[22 + sizeof(modparam_fwpostfix) - 1] = { 0 };
1952 1912
1953 if (!bcm->ucode) { 1913 if (!phy->ucode) {
1954 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_microcode%d%s.fw", 1914 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_microcode%d%s.fw",
1955 (rev >= 5 ? 5 : rev), 1915 (rev >= 5 ? 5 : rev),
1956 modparam_fwpostfix); 1916 modparam_fwpostfix);
1957 err = request_firmware(&bcm->ucode, buf, &bcm->pci_dev->dev); 1917 err = request_firmware(&phy->ucode, buf, &bcm->pci_dev->dev);
1958 if (err) { 1918 if (err) {
1959 printk(KERN_ERR PFX 1919 printk(KERN_ERR PFX
1960 "Error: Microcode \"%s\" not available or load failed.\n", 1920 "Error: Microcode \"%s\" not available or load failed.\n",
@@ -1963,12 +1923,12 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
1963 } 1923 }
1964 } 1924 }
1965 1925
1966 if (!bcm->pcm) { 1926 if (!phy->pcm) {
1967 snprintf(buf, ARRAY_SIZE(buf), 1927 snprintf(buf, ARRAY_SIZE(buf),
1968 "bcm43xx_pcm%d%s.fw", 1928 "bcm43xx_pcm%d%s.fw",
1969 (rev < 5 ? 4 : 5), 1929 (rev < 5 ? 4 : 5),
1970 modparam_fwpostfix); 1930 modparam_fwpostfix);
1971 err = request_firmware(&bcm->pcm, buf, &bcm->pci_dev->dev); 1931 err = request_firmware(&phy->pcm, buf, &bcm->pci_dev->dev);
1972 if (err) { 1932 if (err) {
1973 printk(KERN_ERR PFX 1933 printk(KERN_ERR PFX
1974 "Error: PCM \"%s\" not available or load failed.\n", 1934 "Error: PCM \"%s\" not available or load failed.\n",
@@ -1977,7 +1937,7 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
1977 } 1937 }
1978 } 1938 }
1979 1939
1980 if (!bcm->initvals0) { 1940 if (!phy->initvals0) {
1981 if (rev == 2 || rev == 4) { 1941 if (rev == 2 || rev == 4) {
1982 switch (phy->type) { 1942 switch (phy->type) {
1983 case BCM43xx_PHYTYPE_A: 1943 case BCM43xx_PHYTYPE_A:
@@ -2008,20 +1968,20 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
2008 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw", 1968 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw",
2009 nr, modparam_fwpostfix); 1969 nr, modparam_fwpostfix);
2010 1970
2011 err = request_firmware(&bcm->initvals0, buf, &bcm->pci_dev->dev); 1971 err = request_firmware(&phy->initvals0, buf, &bcm->pci_dev->dev);
2012 if (err) { 1972 if (err) {
2013 printk(KERN_ERR PFX 1973 printk(KERN_ERR PFX
2014 "Error: InitVals \"%s\" not available or load failed.\n", 1974 "Error: InitVals \"%s\" not available or load failed.\n",
2015 buf); 1975 buf);
2016 goto error; 1976 goto error;
2017 } 1977 }
2018 if (bcm->initvals0->size % sizeof(struct bcm43xx_initval)) { 1978 if (phy->initvals0->size % sizeof(struct bcm43xx_initval)) {
2019 printk(KERN_ERR PFX "InitVals fileformat error.\n"); 1979 printk(KERN_ERR PFX "InitVals fileformat error.\n");
2020 goto error; 1980 goto error;
2021 } 1981 }
2022 } 1982 }
2023 1983
2024 if (!bcm->initvals1) { 1984 if (!phy->initvals1) {
2025 if (rev >= 5) { 1985 if (rev >= 5) {
2026 u32 sbtmstatehigh; 1986 u32 sbtmstatehigh;
2027 1987
@@ -2043,14 +2003,14 @@ static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
2043 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw", 2003 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw",
2044 nr, modparam_fwpostfix); 2004 nr, modparam_fwpostfix);
2045 2005
2046 err = request_firmware(&bcm->initvals1, buf, &bcm->pci_dev->dev); 2006 err = request_firmware(&phy->initvals1, buf, &bcm->pci_dev->dev);
2047 if (err) { 2007 if (err) {
2048 printk(KERN_ERR PFX 2008 printk(KERN_ERR PFX
2049 "Error: InitVals \"%s\" not available or load failed.\n", 2009 "Error: InitVals \"%s\" not available or load failed.\n",
2050 buf); 2010 buf);
2051 goto error; 2011 goto error;
2052 } 2012 }
2053 if (bcm->initvals1->size % sizeof(struct bcm43xx_initval)) { 2013 if (phy->initvals1->size % sizeof(struct bcm43xx_initval)) {
2054 printk(KERN_ERR PFX "InitVals fileformat error.\n"); 2014 printk(KERN_ERR PFX "InitVals fileformat error.\n");
2055 goto error; 2015 goto error;
2056 } 2016 }
@@ -2070,12 +2030,13 @@ err_noinitval:
2070 2030
2071static void bcm43xx_upload_microcode(struct bcm43xx_private *bcm) 2031static void bcm43xx_upload_microcode(struct bcm43xx_private *bcm)
2072{ 2032{
2033 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2073 const u32 *data; 2034 const u32 *data;
2074 unsigned int i, len; 2035 unsigned int i, len;
2075 2036
2076 /* Upload Microcode. */ 2037 /* Upload Microcode. */
2077 data = (u32 *)(bcm->ucode->data); 2038 data = (u32 *)(phy->ucode->data);
2078 len = bcm->ucode->size / sizeof(u32); 2039 len = phy->ucode->size / sizeof(u32);
2079 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_UCODE, 0x0000); 2040 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_UCODE, 0x0000);
2080 for (i = 0; i < len; i++) { 2041 for (i = 0; i < len; i++) {
2081 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA, 2042 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA,
@@ -2084,8 +2045,8 @@ static void bcm43xx_upload_microcode(struct bcm43xx_private *bcm)
2084 } 2045 }
2085 2046
2086 /* Upload PCM data. */ 2047 /* Upload PCM data. */
2087 data = (u32 *)(bcm->pcm->data); 2048 data = (u32 *)(phy->pcm->data);
2088 len = bcm->pcm->size / sizeof(u32); 2049 len = phy->pcm->size / sizeof(u32);
2089 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01ea); 2050 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01ea);
2090 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA, 0x00004000); 2051 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA, 0x00004000);
2091 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01eb); 2052 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01eb);
@@ -2131,15 +2092,16 @@ err_format:
2131 2092
2132static int bcm43xx_upload_initvals(struct bcm43xx_private *bcm) 2093static int bcm43xx_upload_initvals(struct bcm43xx_private *bcm)
2133{ 2094{
2095 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2134 int err; 2096 int err;
2135 2097
2136 err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)bcm->initvals0->data, 2098 err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)phy->initvals0->data,
2137 bcm->initvals0->size / sizeof(struct bcm43xx_initval)); 2099 phy->initvals0->size / sizeof(struct bcm43xx_initval));
2138 if (err) 2100 if (err)
2139 goto out; 2101 goto out;
2140 if (bcm->initvals1) { 2102 if (phy->initvals1) {
2141 err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)bcm->initvals1->data, 2103 err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)phy->initvals1->data,
2142 bcm->initvals1->size / sizeof(struct bcm43xx_initval)); 2104 phy->initvals1->size / sizeof(struct bcm43xx_initval));
2143 if (err) 2105 if (err)
2144 goto out; 2106 goto out;
2145 } 2107 }
@@ -2156,9 +2118,7 @@ static struct pci_device_id bcm43xx_47xx_ids[] = {
2156 2118
2157static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm) 2119static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
2158{ 2120{
2159 int res; 2121 int err;
2160 unsigned int i;
2161 u32 data;
2162 2122
2163 bcm->irq = bcm->pci_dev->irq; 2123 bcm->irq = bcm->pci_dev->irq;
2164#ifdef CONFIG_BCM947XX 2124#ifdef CONFIG_BCM947XX
@@ -2175,32 +2135,12 @@ static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
2175 } 2135 }
2176 } 2136 }
2177#endif 2137#endif
2178 res = request_irq(bcm->irq, bcm43xx_interrupt_handler, 2138 err = request_irq(bcm->irq, bcm43xx_interrupt_handler,
2179 IRQF_SHARED, KBUILD_MODNAME, bcm); 2139 IRQF_SHARED, KBUILD_MODNAME, bcm);
2180 if (res) { 2140 if (err)
2181 printk(KERN_ERR PFX "Cannot register IRQ%d\n", bcm->irq); 2141 printk(KERN_ERR PFX "Cannot register IRQ%d\n", bcm->irq);
2182 return -ENODEV;
2183 }
2184 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0xffffffff);
2185 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 0x00020402);
2186 i = 0;
2187 while (1) {
2188 data = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2189 if (data == BCM43xx_IRQ_READY)
2190 break;
2191 i++;
2192 if (i >= BCM43xx_IRQWAIT_MAX_RETRIES) {
2193 printk(KERN_ERR PFX "Card IRQ register not responding. "
2194 "Giving up.\n");
2195 free_irq(bcm->irq, bcm);
2196 return -ENODEV;
2197 }
2198 udelay(10);
2199 }
2200 // dummy read
2201 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2202 2142
2203 return 0; 2143 return err;
2204} 2144}
2205 2145
2206/* Switch to the core used to write the GPIO register. 2146/* Switch to the core used to write the GPIO register.
@@ -2298,13 +2238,17 @@ static int bcm43xx_gpio_cleanup(struct bcm43xx_private *bcm)
2298/* http://bcm-specs.sipsolutions.net/EnableMac */ 2238/* http://bcm-specs.sipsolutions.net/EnableMac */
2299void bcm43xx_mac_enable(struct bcm43xx_private *bcm) 2239void bcm43xx_mac_enable(struct bcm43xx_private *bcm)
2300{ 2240{
2301 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 2241 bcm->mac_suspended--;
2302 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD) 2242 assert(bcm->mac_suspended >= 0);
2303 | BCM43xx_SBF_MAC_ENABLED); 2243 if (bcm->mac_suspended == 0) {
2304 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, BCM43xx_IRQ_READY); 2244 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
2305 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */ 2245 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
2306 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */ 2246 | BCM43xx_SBF_MAC_ENABLED);
2307 bcm43xx_power_saving_ctl_bits(bcm, -1, -1); 2247 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, BCM43xx_IRQ_READY);
2248 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
2249 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
2250 bcm43xx_power_saving_ctl_bits(bcm, -1, -1);
2251 }
2308} 2252}
2309 2253
2310/* http://bcm-specs.sipsolutions.net/SuspendMAC */ 2254/* http://bcm-specs.sipsolutions.net/SuspendMAC */
@@ -2313,18 +2257,23 @@ void bcm43xx_mac_suspend(struct bcm43xx_private *bcm)
2313 int i; 2257 int i;
2314 u32 tmp; 2258 u32 tmp;
2315 2259
2316 bcm43xx_power_saving_ctl_bits(bcm, -1, 1); 2260 assert(bcm->mac_suspended >= 0);
2317 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 2261 if (bcm->mac_suspended == 0) {
2318 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD) 2262 bcm43xx_power_saving_ctl_bits(bcm, -1, 1);
2319 & ~BCM43xx_SBF_MAC_ENABLED); 2263 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
2320 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */ 2264 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
2321 for (i = 100000; i; i--) { 2265 & ~BCM43xx_SBF_MAC_ENABLED);
2322 tmp = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); 2266 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
2323 if (tmp & BCM43xx_IRQ_READY) 2267 for (i = 10000; i; i--) {
2324 return; 2268 tmp = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2325 udelay(10); 2269 if (tmp & BCM43xx_IRQ_READY)
2270 goto out;
2271 udelay(1);
2272 }
2273 printkl(KERN_ERR PFX "MAC suspend failed\n");
2326 } 2274 }
2327 printkl(KERN_ERR PFX "MAC suspend failed\n"); 2275out:
2276 bcm->mac_suspended++;
2328} 2277}
2329 2278
2330void bcm43xx_set_iwmode(struct bcm43xx_private *bcm, 2279void bcm43xx_set_iwmode(struct bcm43xx_private *bcm,
@@ -2394,7 +2343,6 @@ static void bcm43xx_chip_cleanup(struct bcm43xx_private *bcm)
2394 if (!modparam_noleds) 2343 if (!modparam_noleds)
2395 bcm43xx_leds_exit(bcm); 2344 bcm43xx_leds_exit(bcm);
2396 bcm43xx_gpio_cleanup(bcm); 2345 bcm43xx_gpio_cleanup(bcm);
2397 free_irq(bcm->irq, bcm);
2398 bcm43xx_release_firmware(bcm, 0); 2346 bcm43xx_release_firmware(bcm, 0);
2399} 2347}
2400 2348
@@ -2406,7 +2354,7 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2406 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 2354 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2407 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 2355 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2408 int err; 2356 int err;
2409 int tmp; 2357 int i, tmp;
2410 u32 value32; 2358 u32 value32;
2411 u16 value16; 2359 u16 value16;
2412 2360
@@ -2419,13 +2367,53 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2419 goto out; 2367 goto out;
2420 bcm43xx_upload_microcode(bcm); 2368 bcm43xx_upload_microcode(bcm);
2421 2369
2422 err = bcm43xx_initialize_irq(bcm); 2370 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0xFFFFFFFF);
2423 if (err) 2371 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 0x00020402);
2372 i = 0;
2373 while (1) {
2374 value32 = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2375 if (value32 == BCM43xx_IRQ_READY)
2376 break;
2377 i++;
2378 if (i >= BCM43xx_IRQWAIT_MAX_RETRIES) {
2379 printk(KERN_ERR PFX "IRQ_READY timeout\n");
2380 err = -ENODEV;
2381 goto err_release_fw;
2382 }
2383 udelay(10);
2384 }
2385 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
2386
2387 value16 = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2388 BCM43xx_UCODE_REVISION);
2389
2390 dprintk(KERN_INFO PFX "Microcode rev 0x%x, pl 0x%x "
2391 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", value16,
2392 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2393 BCM43xx_UCODE_PATCHLEVEL),
2394 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2395 BCM43xx_UCODE_DATE) >> 12) & 0xf,
2396 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2397 BCM43xx_UCODE_DATE) >> 8) & 0xf,
2398 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2399 BCM43xx_UCODE_DATE) & 0xff,
2400 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2401 BCM43xx_UCODE_TIME) >> 11) & 0x1f,
2402 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2403 BCM43xx_UCODE_TIME) >> 5) & 0x3f,
2404 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2405 BCM43xx_UCODE_TIME) & 0x1f);
2406
2407 if ( value16 > 0x128 ) {
2408 dprintk(KERN_ERR PFX
2409 "Firmware: no support for microcode rev > 0x128\n");
2410 err = -1;
2424 goto err_release_fw; 2411 goto err_release_fw;
2412 }
2425 2413
2426 err = bcm43xx_gpio_init(bcm); 2414 err = bcm43xx_gpio_init(bcm);
2427 if (err) 2415 if (err)
2428 goto err_free_irq; 2416 goto err_release_fw;
2429 2417
2430 err = bcm43xx_upload_initvals(bcm); 2418 err = bcm43xx_upload_initvals(bcm);
2431 if (err) 2419 if (err)
@@ -2489,10 +2477,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2489 bcm43xx_write32(bcm, 0x018C, 0x02000000); 2477 bcm43xx_write32(bcm, 0x018C, 0x02000000);
2490 } 2478 }
2491 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000); 2479 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000);
2492 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0001DC00); 2480 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
2481 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
2493 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00); 2482 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
2494 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0000DC00); 2483 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
2495 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0001DC00); 2484 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
2485 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
2496 2486
2497 value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); 2487 value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
2498 value32 |= 0x00100000; 2488 value32 |= 0x00100000;
@@ -2509,8 +2499,6 @@ err_radio_off:
2509 bcm43xx_radio_turn_off(bcm); 2499 bcm43xx_radio_turn_off(bcm);
2510err_gpio_cleanup: 2500err_gpio_cleanup:
2511 bcm43xx_gpio_cleanup(bcm); 2501 bcm43xx_gpio_cleanup(bcm);
2512err_free_irq:
2513 free_irq(bcm->irq, bcm);
2514err_release_fw: 2502err_release_fw:
2515 bcm43xx_release_firmware(bcm, 1); 2503 bcm43xx_release_firmware(bcm, 1);
2516 goto out; 2504 goto out;
@@ -2550,11 +2538,9 @@ static void bcm43xx_init_struct_phyinfo(struct bcm43xx_phyinfo *phy)
2550{ 2538{
2551 /* Initialize a "phyinfo" structure. The structure is already 2539 /* Initialize a "phyinfo" structure. The structure is already
2552 * zeroed out. 2540 * zeroed out.
2541 * This is called on insmod time to initialize members.
2553 */ 2542 */
2554 phy->antenna_diversity = 0xFFFF;
2555 phy->savedpctlreg = 0xFFFF; 2543 phy->savedpctlreg = 0xFFFF;
2556 phy->minlowsig[0] = 0xFFFF;
2557 phy->minlowsig[1] = 0xFFFF;
2558 spin_lock_init(&phy->lock); 2544 spin_lock_init(&phy->lock);
2559} 2545}
2560 2546
@@ -2562,14 +2548,11 @@ static void bcm43xx_init_struct_radioinfo(struct bcm43xx_radioinfo *radio)
2562{ 2548{
2563 /* Initialize a "radioinfo" structure. The structure is already 2549 /* Initialize a "radioinfo" structure. The structure is already
2564 * zeroed out. 2550 * zeroed out.
2551 * This is called on insmod time to initialize members.
2565 */ 2552 */
2566 radio->interfmode = BCM43xx_RADIO_INTERFMODE_NONE; 2553 radio->interfmode = BCM43xx_RADIO_INTERFMODE_NONE;
2567 radio->channel = 0xFF; 2554 radio->channel = 0xFF;
2568 radio->initial_channel = 0xFF; 2555 radio->initial_channel = 0xFF;
2569 radio->lofcal = 0xFFFF;
2570 radio->initval = 0xFFFF;
2571 radio->nrssi[0] = -1000;
2572 radio->nrssi[1] = -1000;
2573} 2556}
2574 2557
2575static int bcm43xx_probe_cores(struct bcm43xx_private *bcm) 2558static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
@@ -2587,7 +2570,6 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
2587 * BCM43xx_MAX_80211_CORES); 2570 * BCM43xx_MAX_80211_CORES);
2588 memset(&bcm->core_80211_ext, 0, sizeof(struct bcm43xx_coreinfo_80211) 2571 memset(&bcm->core_80211_ext, 0, sizeof(struct bcm43xx_coreinfo_80211)
2589 * BCM43xx_MAX_80211_CORES); 2572 * BCM43xx_MAX_80211_CORES);
2590 bcm->current_80211_core_idx = -1;
2591 bcm->nr_80211_available = 0; 2573 bcm->nr_80211_available = 0;
2592 bcm->current_core = NULL; 2574 bcm->current_core = NULL;
2593 bcm->active_80211_core = NULL; 2575 bcm->active_80211_core = NULL;
@@ -2757,6 +2739,7 @@ static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
2757 goto out; 2739 goto out;
2758 } 2740 }
2759 bcm->nr_80211_available++; 2741 bcm->nr_80211_available++;
2742 core->priv = ext_80211;
2760 bcm43xx_init_struct_phyinfo(&ext_80211->phy); 2743 bcm43xx_init_struct_phyinfo(&ext_80211->phy);
2761 bcm43xx_init_struct_radioinfo(&ext_80211->radio); 2744 bcm43xx_init_struct_radioinfo(&ext_80211->radio);
2762 break; 2745 break;
@@ -2857,7 +2840,8 @@ static void bcm43xx_wireless_core_cleanup(struct bcm43xx_private *bcm)
2857} 2840}
2858 2841
2859/* http://bcm-specs.sipsolutions.net/80211Init */ 2842/* http://bcm-specs.sipsolutions.net/80211Init */
2860static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm) 2843static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm,
2844 int active_wlcore)
2861{ 2845{
2862 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 2846 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2863 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 2847 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
@@ -2939,19 +2923,26 @@ static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm)
2939 if (bcm->current_core->rev >= 5) 2923 if (bcm->current_core->rev >= 5)
2940 bcm43xx_write16(bcm, 0x043C, 0x000C); 2924 bcm43xx_write16(bcm, 0x043C, 0x000C);
2941 2925
2942 if (bcm43xx_using_pio(bcm)) 2926 if (active_wlcore) {
2943 err = bcm43xx_pio_init(bcm); 2927 if (bcm43xx_using_pio(bcm))
2944 else 2928 err = bcm43xx_pio_init(bcm);
2945 err = bcm43xx_dma_init(bcm); 2929 else
2946 if (err) 2930 err = bcm43xx_dma_init(bcm);
2947 goto err_chip_cleanup; 2931 if (err)
2932 goto err_chip_cleanup;
2933 }
2948 bcm43xx_write16(bcm, 0x0612, 0x0050); 2934 bcm43xx_write16(bcm, 0x0612, 0x0050);
2949 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0416, 0x0050); 2935 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0416, 0x0050);
2950 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0414, 0x01F4); 2936 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0414, 0x01F4);
2951 2937
2952 bcm43xx_mac_enable(bcm); 2938 if (active_wlcore) {
2953 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate); 2939 if (radio->initial_channel != 0xFF)
2940 bcm43xx_radio_selectchannel(bcm, radio->initial_channel, 0);
2941 }
2954 2942
2943 /* Don't enable MAC/IRQ here, as it will race with the IRQ handler.
2944 * We enable it later.
2945 */
2955 bcm->current_core->initialized = 1; 2946 bcm->current_core->initialized = 1;
2956out: 2947out:
2957 return err; 2948 return err;
@@ -3066,11 +3057,6 @@ out:
3066 return err; 3057 return err;
3067} 3058}
3068 3059
3069static void bcm43xx_softmac_init(struct bcm43xx_private *bcm)
3070{
3071 ieee80211softmac_start(bcm->net_dev);
3072}
3073
3074static void bcm43xx_periodic_every120sec(struct bcm43xx_private *bcm) 3060static void bcm43xx_periodic_every120sec(struct bcm43xx_private *bcm)
3075{ 3061{
3076 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 3062 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
@@ -3182,47 +3168,46 @@ static void bcm43xx_periodic_work_handler(void *d)
3182 /* Periodic work will take a long time, so we want it to 3168 /* Periodic work will take a long time, so we want it to
3183 * be preemtible. 3169 * be preemtible.
3184 */ 3170 */
3185 bcm43xx_lock_irqonly(bcm, flags); 3171 mutex_lock(&bcm->mutex);
3186 netif_stop_queue(bcm->net_dev); 3172 netif_stop_queue(bcm->net_dev);
3173 synchronize_net();
3174 spin_lock_irqsave(&bcm->irq_lock, flags);
3175 bcm43xx_mac_suspend(bcm);
3187 if (bcm43xx_using_pio(bcm)) 3176 if (bcm43xx_using_pio(bcm))
3188 bcm43xx_pio_freeze_txqueues(bcm); 3177 bcm43xx_pio_freeze_txqueues(bcm);
3189 savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 3178 savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
3190 bcm43xx_unlock_irqonly(bcm, flags); 3179 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3191 bcm43xx_lock_noirq(bcm);
3192 bcm43xx_synchronize_irq(bcm); 3180 bcm43xx_synchronize_irq(bcm);
3193 } else { 3181 } else {
3194 /* Periodic work should take short time, so we want low 3182 /* Periodic work should take short time, so we want low
3195 * locking overhead. 3183 * locking overhead.
3196 */ 3184 */
3197 bcm43xx_lock_irqsafe(bcm, flags); 3185 mutex_lock(&bcm->mutex);
3186 spin_lock_irqsave(&bcm->irq_lock, flags);
3198 } 3187 }
3199 3188
3200 do_periodic_work(bcm); 3189 do_periodic_work(bcm);
3201 3190
3202 if (badness > BADNESS_LIMIT) { 3191 if (badness > BADNESS_LIMIT) {
3203 bcm43xx_lock_irqonly(bcm, flags); 3192 spin_lock_irqsave(&bcm->irq_lock, flags);
3204 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) { 3193 tasklet_enable(&bcm->isr_tasklet);
3205 tasklet_enable(&bcm->isr_tasklet); 3194 bcm43xx_interrupt_enable(bcm, savedirqs);
3206 bcm43xx_interrupt_enable(bcm, savedirqs); 3195 if (bcm43xx_using_pio(bcm))
3207 if (bcm43xx_using_pio(bcm)) 3196 bcm43xx_pio_thaw_txqueues(bcm);
3208 bcm43xx_pio_thaw_txqueues(bcm); 3197 bcm43xx_mac_enable(bcm);
3209 }
3210 netif_wake_queue(bcm->net_dev); 3198 netif_wake_queue(bcm->net_dev);
3211 mmiowb();
3212 bcm43xx_unlock_irqonly(bcm, flags);
3213 bcm43xx_unlock_noirq(bcm);
3214 } else {
3215 mmiowb();
3216 bcm43xx_unlock_irqsafe(bcm, flags);
3217 } 3199 }
3200 mmiowb();
3201 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3202 mutex_unlock(&bcm->mutex);
3218} 3203}
3219 3204
3220static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) 3205void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
3221{ 3206{
3222 cancel_rearming_delayed_work(&bcm->periodic_work); 3207 cancel_rearming_delayed_work(&bcm->periodic_work);
3223} 3208}
3224 3209
3225static void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm) 3210void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
3226{ 3211{
3227 struct work_struct *work = &(bcm->periodic_work); 3212 struct work_struct *work = &(bcm->periodic_work);
3228 3213
@@ -3243,9 +3228,9 @@ static int bcm43xx_rng_read(struct hwrng *rng, u32 *data)
3243 struct bcm43xx_private *bcm = (struct bcm43xx_private *)rng->priv; 3228 struct bcm43xx_private *bcm = (struct bcm43xx_private *)rng->priv;
3244 unsigned long flags; 3229 unsigned long flags;
3245 3230
3246 bcm43xx_lock_irqonly(bcm, flags); 3231 spin_lock_irqsave(&(bcm)->irq_lock, flags);
3247 *data = bcm43xx_read16(bcm, BCM43xx_MMIO_RNG); 3232 *data = bcm43xx_read16(bcm, BCM43xx_MMIO_RNG);
3248 bcm43xx_unlock_irqonly(bcm, flags); 3233 spin_unlock_irqrestore(&(bcm)->irq_lock, flags);
3249 3234
3250 return (sizeof(u16)); 3235 return (sizeof(u16));
3251} 3236}
@@ -3271,139 +3256,329 @@ static int bcm43xx_rng_init(struct bcm43xx_private *bcm)
3271 return err; 3256 return err;
3272} 3257}
3273 3258
3274/* This is the opposite of bcm43xx_init_board() */ 3259static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm)
3275static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3276{ 3260{
3261 int ret = 0;
3277 int i, err; 3262 int i, err;
3263 struct bcm43xx_coreinfo *core;
3278 3264
3279 bcm43xx_lock_noirq(bcm); 3265 bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
3266 for (i = 0; i < bcm->nr_80211_available; i++) {
3267 core = &(bcm->core_80211[i]);
3268 assert(core->available);
3269 if (!core->initialized)
3270 continue;
3271 err = bcm43xx_switch_core(bcm, core);
3272 if (err) {
3273 dprintk(KERN_ERR PFX "shutdown_all_wireless_cores "
3274 "switch_core failed (%d)\n", err);
3275 ret = err;
3276 continue;
3277 }
3278 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
3279 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
3280 bcm43xx_wireless_core_cleanup(bcm);
3281 if (core == bcm->active_80211_core)
3282 bcm->active_80211_core = NULL;
3283 }
3284 free_irq(bcm->irq, bcm);
3285 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
3286
3287 return ret;
3288}
3289
3290/* This is the opposite of bcm43xx_init_board() */
3291static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3292{
3293 bcm43xx_rng_exit(bcm);
3280 bcm43xx_sysfs_unregister(bcm); 3294 bcm43xx_sysfs_unregister(bcm);
3281 bcm43xx_periodic_tasks_delete(bcm); 3295 bcm43xx_periodic_tasks_delete(bcm);
3282 3296
3283 bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN); 3297 mutex_lock(&(bcm)->mutex);
3298 bcm43xx_shutdown_all_wireless_cores(bcm);
3299 bcm43xx_pctl_set_crystal(bcm, 0);
3300 mutex_unlock(&(bcm)->mutex);
3301}
3284 3302
3285 bcm43xx_rng_exit(bcm); 3303static void prepare_phydata_for_init(struct bcm43xx_phyinfo *phy)
3304{
3305 phy->antenna_diversity = 0xFFFF;
3306 memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
3307 memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
3308
3309 /* Flags */
3310 phy->calibrated = 0;
3311 phy->is_locked = 0;
3312
3313 if (phy->_lo_pairs) {
3314 memset(phy->_lo_pairs, 0,
3315 sizeof(struct bcm43xx_lopair) * BCM43xx_LO_COUNT);
3316 }
3317 memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain));
3318}
3319
3320static void prepare_radiodata_for_init(struct bcm43xx_private *bcm,
3321 struct bcm43xx_radioinfo *radio)
3322{
3323 int i;
3324
3325 /* Set default attenuation values. */
3326 radio->baseband_atten = bcm43xx_default_baseband_attenuation(bcm);
3327 radio->radio_atten = bcm43xx_default_radio_attenuation(bcm);
3328 radio->txctl1 = bcm43xx_default_txctl1(bcm);
3329 radio->txctl2 = 0xFFFF;
3330 radio->txpwr_offset = 0;
3331
3332 /* NRSSI */
3333 radio->nrssislope = 0;
3334 for (i = 0; i < ARRAY_SIZE(radio->nrssi); i++)
3335 radio->nrssi[i] = -1000;
3336 for (i = 0; i < ARRAY_SIZE(radio->nrssi_lt); i++)
3337 radio->nrssi_lt[i] = i;
3338
3339 radio->lofcal = 0xFFFF;
3340 radio->initval = 0xFFFF;
3341
3342 radio->aci_enable = 0;
3343 radio->aci_wlan_automatic = 0;
3344 radio->aci_hw_rssi = 0;
3345}
3346
3347static void prepare_priv_for_init(struct bcm43xx_private *bcm)
3348{
3349 int i;
3350 struct bcm43xx_coreinfo *core;
3351 struct bcm43xx_coreinfo_80211 *wlext;
3352
3353 assert(!bcm->active_80211_core);
3354
3355 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
3356
3357 /* Flags */
3358 bcm->was_initialized = 0;
3359 bcm->reg124_set_0x4 = 0;
3360
3361 /* Stats */
3362 memset(&bcm->stats, 0, sizeof(bcm->stats));
3363
3364 /* Wireless core data */
3286 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) { 3365 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
3287 if (!bcm->core_80211[i].available) 3366 core = &(bcm->core_80211[i]);
3288 continue; 3367 wlext = core->priv;
3289 if (!bcm->core_80211[i].initialized) 3368
3369 if (!core->available)
3290 continue; 3370 continue;
3371 assert(wlext == &(bcm->core_80211_ext[i]));
3291 3372
3292 err = bcm43xx_switch_core(bcm, &bcm->core_80211[i]); 3373 prepare_phydata_for_init(&wlext->phy);
3293 assert(err == 0); 3374 prepare_radiodata_for_init(bcm, &wlext->radio);
3294 bcm43xx_wireless_core_cleanup(bcm);
3295 } 3375 }
3296 3376
3297 bcm43xx_pctl_set_crystal(bcm, 0); 3377 /* IRQ related flags */
3378 bcm->irq_reason = 0;
3379 memset(bcm->dma_reason, 0, sizeof(bcm->dma_reason));
3380 bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
3298 3381
3299 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT); 3382 bcm->mac_suspended = 1;
3300 bcm43xx_unlock_noirq(bcm); 3383
3384 /* Noise calculation context */
3385 memset(&bcm->noisecalc, 0, sizeof(bcm->noisecalc));
3386
3387 /* Periodic work context */
3388 bcm->periodic_state = 0;
3301} 3389}
3302 3390
3303static int bcm43xx_init_board(struct bcm43xx_private *bcm) 3391static int wireless_core_up(struct bcm43xx_private *bcm,
3392 int active_wlcore)
3393{
3394 int err;
3395
3396 if (!bcm43xx_core_enabled(bcm))
3397 bcm43xx_wireless_core_reset(bcm, 1);
3398 if (!active_wlcore)
3399 bcm43xx_wireless_core_mark_inactive(bcm);
3400 err = bcm43xx_wireless_core_init(bcm, active_wlcore);
3401 if (err)
3402 goto out;
3403 if (!active_wlcore)
3404 bcm43xx_radio_turn_off(bcm);
3405out:
3406 return err;
3407}
3408
3409/* Select and enable the "to be used" wireless core.
3410 * Locking: bcm->mutex must be aquired before calling this.
3411 * bcm->irq_lock must not be aquired.
3412 */
3413int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
3414 int phytype)
3304{ 3415{
3305 int i, err; 3416 int i, err;
3306 int connect_phy; 3417 struct bcm43xx_coreinfo *active_core = NULL;
3418 struct bcm43xx_coreinfo_80211 *active_wlext = NULL;
3419 struct bcm43xx_coreinfo *core;
3420 struct bcm43xx_coreinfo_80211 *wlext;
3421 int adjust_active_sbtmstatelow = 0;
3307 3422
3308 might_sleep(); 3423 might_sleep();
3309 3424
3310 bcm43xx_lock_noirq(bcm); 3425 if (phytype < 0) {
3311 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING); 3426 /* If no phytype is requested, select the first core. */
3427 assert(bcm->core_80211[0].available);
3428 wlext = bcm->core_80211[0].priv;
3429 phytype = wlext->phy.type;
3430 }
3431 /* Find the requested core. */
3432 for (i = 0; i < bcm->nr_80211_available; i++) {
3433 core = &(bcm->core_80211[i]);
3434 wlext = core->priv;
3435 if (wlext->phy.type == phytype) {
3436 active_core = core;
3437 active_wlext = wlext;
3438 break;
3439 }
3440 }
3441 if (!active_core)
3442 return -ESRCH; /* No such PHYTYPE on this board. */
3443
3444 if (bcm->active_80211_core) {
3445 /* We already selected a wl core in the past.
3446 * So first clean up everything.
3447 */
3448 dprintk(KERN_INFO PFX "select_wireless_core: cleanup\n");
3449 ieee80211softmac_stop(bcm->net_dev);
3450 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
3451 err = bcm43xx_disable_interrupts_sync(bcm);
3452 assert(!err);
3453 tasklet_enable(&bcm->isr_tasklet);
3454 err = bcm43xx_shutdown_all_wireless_cores(bcm);
3455 if (err)
3456 goto error;
3457 /* Ok, everything down, continue to re-initialize. */
3458 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
3459 }
3460
3461 /* Reset all data structures. */
3462 prepare_priv_for_init(bcm);
3312 3463
3313 err = bcm43xx_pctl_set_crystal(bcm, 1);
3314 if (err)
3315 goto out;
3316 err = bcm43xx_pctl_init(bcm);
3317 if (err)
3318 goto err_crystal_off;
3319 err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_FAST); 3464 err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_FAST);
3320 if (err) 3465 if (err)
3321 goto err_crystal_off; 3466 goto error;
3322 3467
3323 tasklet_enable(&bcm->isr_tasklet); 3468 /* Mark all unused cores "inactive". */
3324 for (i = 0; i < bcm->nr_80211_available; i++) { 3469 for (i = 0; i < bcm->nr_80211_available; i++) {
3325 err = bcm43xx_switch_core(bcm, &bcm->core_80211[i]); 3470 core = &(bcm->core_80211[i]);
3326 assert(err != -ENODEV); 3471 wlext = core->priv;
3327 if (err)
3328 goto err_80211_unwind;
3329 3472
3330 /* Enable the selected wireless core. 3473 if (core == active_core)
3331 * Connect PHY only on the first core. 3474 continue;
3332 */ 3475 err = bcm43xx_switch_core(bcm, core);
3333 if (!bcm43xx_core_enabled(bcm)) { 3476 if (err) {
3334 if (bcm->nr_80211_available == 1) { 3477 dprintk(KERN_ERR PFX "Could not switch to inactive "
3335 connect_phy = bcm43xx_current_phy(bcm)->connected; 3478 "802.11 core (%d)\n", err);
3336 } else { 3479 goto error;
3337 if (i == 0)
3338 connect_phy = 1;
3339 else
3340 connect_phy = 0;
3341 }
3342 bcm43xx_wireless_core_reset(bcm, connect_phy);
3343 } 3480 }
3481 err = wireless_core_up(bcm, 0);
3482 if (err) {
3483 dprintk(KERN_ERR PFX "core_up for inactive 802.11 core "
3484 "failed (%d)\n", err);
3485 goto error;
3486 }
3487 adjust_active_sbtmstatelow = 1;
3488 }
3344 3489
3345 if (i != 0) 3490 /* Now initialize the active 802.11 core. */
3346 bcm43xx_wireless_core_mark_inactive(bcm, &bcm->core_80211[0]); 3491 err = bcm43xx_switch_core(bcm, active_core);
3347 3492 if (err) {
3348 err = bcm43xx_wireless_core_init(bcm); 3493 dprintk(KERN_ERR PFX "Could not switch to active "
3349 if (err) 3494 "802.11 core (%d)\n", err);
3350 goto err_80211_unwind; 3495 goto error;
3496 }
3497 if (adjust_active_sbtmstatelow &&
3498 active_wlext->phy.type == BCM43xx_PHYTYPE_G) {
3499 u32 sbtmstatelow;
3351 3500
3352 if (i != 0) { 3501 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
3353 bcm43xx_mac_suspend(bcm); 3502 sbtmstatelow |= 0x20000000;
3354 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 3503 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
3355 bcm43xx_radio_turn_off(bcm);
3356 }
3357 } 3504 }
3358 bcm->active_80211_core = &bcm->core_80211[0]; 3505 err = wireless_core_up(bcm, 1);
3359 if (bcm->nr_80211_available >= 2) { 3506 if (err) {
3360 bcm43xx_switch_core(bcm, &bcm->core_80211[0]); 3507 dprintk(KERN_ERR PFX "core_up for active 802.11 core "
3361 bcm43xx_mac_enable(bcm); 3508 "failed (%d)\n", err);
3509 goto error;
3362 } 3510 }
3363 err = bcm43xx_rng_init(bcm); 3511 err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_DYNAMIC);
3364 if (err) 3512 if (err)
3365 goto err_80211_unwind; 3513 goto error;
3514 bcm->active_80211_core = active_core;
3515
3366 bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC); 3516 bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
3367 bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr)); 3517 bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr));
3368 dprintk(KERN_INFO PFX "80211 cores initialized\n");
3369 bcm43xx_security_init(bcm); 3518 bcm43xx_security_init(bcm);
3370 bcm43xx_softmac_init(bcm); 3519 ieee80211softmac_start(bcm->net_dev);
3371 3520
3372 bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_DYNAMIC); 3521 /* Let's go! Be careful after enabling the IRQs.
3522 * Don't switch cores, for example.
3523 */
3524 bcm43xx_mac_enable(bcm);
3525 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
3526 err = bcm43xx_initialize_irq(bcm);
3527 if (err)
3528 goto error;
3529 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
3373 3530
3374 if (bcm43xx_current_radio(bcm)->initial_channel != 0xFF) { 3531 dprintk(KERN_INFO PFX "Selected 802.11 core (phytype %d)\n",
3375 bcm43xx_mac_suspend(bcm); 3532 active_wlext->phy.type);
3376 bcm43xx_radio_selectchannel(bcm, bcm43xx_current_radio(bcm)->initial_channel, 0);
3377 bcm43xx_mac_enable(bcm);
3378 }
3379 3533
3380 /* Initialization of the board is done. Flag it as such. */ 3534 return 0;
3381 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED); 3535
3536error:
3537 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
3538 bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_SLOW);
3539 return err;
3540}
3382 3541
3542static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3543{
3544 int err;
3545
3546 mutex_lock(&(bcm)->mutex);
3547
3548 tasklet_enable(&bcm->isr_tasklet);
3549 err = bcm43xx_pctl_set_crystal(bcm, 1);
3550 if (err)
3551 goto err_tasklet;
3552 err = bcm43xx_pctl_init(bcm);
3553 if (err)
3554 goto err_crystal_off;
3555 err = bcm43xx_select_wireless_core(bcm, -1);
3556 if (err)
3557 goto err_crystal_off;
3558 err = bcm43xx_sysfs_register(bcm);
3559 if (err)
3560 goto err_wlshutdown;
3561 err = bcm43xx_rng_init(bcm);
3562 if (err)
3563 goto err_sysfs_unreg;
3383 bcm43xx_periodic_tasks_setup(bcm); 3564 bcm43xx_periodic_tasks_setup(bcm);
3384 bcm43xx_sysfs_register(bcm);
3385 //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though...
3386 3565
3387 /*FIXME: This should be handled by softmac instead. */ 3566 /*FIXME: This should be handled by softmac instead. */
3388 schedule_work(&bcm->softmac->associnfo.work); 3567 schedule_work(&bcm->softmac->associnfo.work);
3389 3568
3390 assert(err == 0);
3391out: 3569out:
3392 bcm43xx_unlock_noirq(bcm); 3570 mutex_unlock(&(bcm)->mutex);
3393 3571
3394 return err; 3572 return err;
3395 3573
3396err_80211_unwind: 3574err_sysfs_unreg:
3397 tasklet_disable(&bcm->isr_tasklet); 3575 bcm43xx_sysfs_unregister(bcm);
3398 /* unwind all 80211 initialization */ 3576err_wlshutdown:
3399 for (i = 0; i < bcm->nr_80211_available; i++) { 3577 bcm43xx_shutdown_all_wireless_cores(bcm);
3400 if (!bcm->core_80211[i].initialized)
3401 continue;
3402 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
3403 bcm43xx_wireless_core_cleanup(bcm);
3404 }
3405err_crystal_off: 3578err_crystal_off:
3406 bcm43xx_pctl_set_crystal(bcm, 0); 3579 bcm43xx_pctl_set_crystal(bcm, 0);
3580err_tasklet:
3581 tasklet_disable(&bcm->isr_tasklet);
3407 goto out; 3582 goto out;
3408} 3583}
3409 3584
@@ -3647,7 +3822,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
3647 struct bcm43xx_radioinfo *radio; 3822 struct bcm43xx_radioinfo *radio;
3648 unsigned long flags; 3823 unsigned long flags;
3649 3824
3650 bcm43xx_lock_irqsafe(bcm, flags); 3825 mutex_lock(&bcm->mutex);
3826 spin_lock_irqsave(&bcm->irq_lock, flags);
3651 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 3827 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
3652 bcm43xx_mac_suspend(bcm); 3828 bcm43xx_mac_suspend(bcm);
3653 bcm43xx_radio_selectchannel(bcm, channel, 0); 3829 bcm43xx_radio_selectchannel(bcm, channel, 0);
@@ -3656,7 +3832,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
3656 radio = bcm43xx_current_radio(bcm); 3832 radio = bcm43xx_current_radio(bcm);
3657 radio->initial_channel = channel; 3833 radio->initial_channel = channel;
3658 } 3834 }
3659 bcm43xx_unlock_irqsafe(bcm, flags); 3835 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3836 mutex_unlock(&bcm->mutex);
3660} 3837}
3661 3838
3662/* set_security() callback in struct ieee80211_device */ 3839/* set_security() callback in struct ieee80211_device */
@@ -3670,7 +3847,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3670 3847
3671 dprintk(KERN_INFO PFX "set security called"); 3848 dprintk(KERN_INFO PFX "set security called");
3672 3849
3673 bcm43xx_lock_irqsafe(bcm, flags); 3850 mutex_lock(&bcm->mutex);
3851 spin_lock_irqsave(&bcm->irq_lock, flags);
3674 3852
3675 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++) 3853 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
3676 if (sec->flags & (1<<keyidx)) { 3854 if (sec->flags & (1<<keyidx)) {
@@ -3701,7 +3879,7 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3701 } 3879 }
3702 if (sec->flags & SEC_AUTH_MODE) { 3880 if (sec->flags & SEC_AUTH_MODE) {
3703 secinfo->auth_mode = sec->auth_mode; 3881 secinfo->auth_mode = sec->auth_mode;
3704 dprintk(", .auth_mode = %d\n", sec->auth_mode); 3882 dprintk(", .auth_mode = %d", sec->auth_mode);
3705 } 3883 }
3706 dprintk("\n"); 3884 dprintk("\n");
3707 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED && 3885 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
@@ -3739,7 +3917,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3739 } else 3917 } else
3740 bcm43xx_clear_keys(bcm); 3918 bcm43xx_clear_keys(bcm);
3741 } 3919 }
3742 bcm43xx_unlock_irqsafe(bcm, flags); 3920 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3921 mutex_unlock(&bcm->mutex);
3743} 3922}
3744 3923
3745/* hard_start_xmit() callback in struct ieee80211_device */ 3924/* hard_start_xmit() callback in struct ieee80211_device */
@@ -3751,12 +3930,14 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
3751 int err = -ENODEV; 3930 int err = -ENODEV;
3752 unsigned long flags; 3931 unsigned long flags;
3753 3932
3754 bcm43xx_lock_irqonly(bcm, flags); 3933 spin_lock_irqsave(&bcm->irq_lock, flags);
3755 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) 3934 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED))
3756 err = bcm43xx_tx(bcm, txb); 3935 err = bcm43xx_tx(bcm, txb);
3757 bcm43xx_unlock_irqonly(bcm, flags); 3936 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3758 3937
3759 return err; 3938 if (unlikely(err))
3939 return NETDEV_TX_BUSY;
3940 return NETDEV_TX_OK;
3760} 3941}
3761 3942
3762static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev) 3943static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)
@@ -3769,9 +3950,9 @@ static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
3769 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 3950 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
3770 unsigned long flags; 3951 unsigned long flags;
3771 3952
3772 bcm43xx_lock_irqonly(bcm, flags); 3953 spin_lock_irqsave(&bcm->irq_lock, flags);
3773 bcm43xx_controller_restart(bcm, "TX timeout"); 3954 bcm43xx_controller_restart(bcm, "TX timeout");
3774 bcm43xx_unlock_irqonly(bcm, flags); 3955 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3775} 3956}
3776 3957
3777#ifdef CONFIG_NET_POLL_CONTROLLER 3958#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3781,7 +3962,8 @@ static void bcm43xx_net_poll_controller(struct net_device *net_dev)
3781 unsigned long flags; 3962 unsigned long flags;
3782 3963
3783 local_irq_save(flags); 3964 local_irq_save(flags);
3784 bcm43xx_interrupt_handler(bcm->irq, bcm, NULL); 3965 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
3966 bcm43xx_interrupt_handler(bcm->irq, bcm, NULL);
3785 local_irq_restore(flags); 3967 local_irq_restore(flags);
3786} 3968}
3787#endif /* CONFIG_NET_POLL_CONTROLLER */ 3969#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -3799,9 +3981,10 @@ static int bcm43xx_net_stop(struct net_device *net_dev)
3799 int err; 3981 int err;
3800 3982
3801 ieee80211softmac_stop(net_dev); 3983 ieee80211softmac_stop(net_dev);
3802 err = bcm43xx_disable_interrupts_sync(bcm, NULL); 3984 err = bcm43xx_disable_interrupts_sync(bcm);
3803 assert(!err); 3985 assert(!err);
3804 bcm43xx_free_board(bcm); 3986 bcm43xx_free_board(bcm);
3987 flush_scheduled_work();
3805 3988
3806 return 0; 3989 return 0;
3807} 3990}
@@ -3818,10 +4001,12 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
3818 bcm->softmac->set_channel = bcm43xx_ieee80211_set_chan; 4001 bcm->softmac->set_channel = bcm43xx_ieee80211_set_chan;
3819 4002
3820 bcm->irq_savedstate = BCM43xx_IRQ_INITIAL; 4003 bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
4004 bcm->mac_suspended = 1;
3821 bcm->pci_dev = pci_dev; 4005 bcm->pci_dev = pci_dev;
3822 bcm->net_dev = net_dev; 4006 bcm->net_dev = net_dev;
3823 bcm->bad_frames_preempt = modparam_bad_frames_preempt; 4007 bcm->bad_frames_preempt = modparam_bad_frames_preempt;
3824 spin_lock_init(&bcm->irq_lock); 4008 spin_lock_init(&bcm->irq_lock);
4009 spin_lock_init(&bcm->leds_lock);
3825 mutex_init(&bcm->mutex); 4010 mutex_init(&bcm->mutex);
3826 tasklet_init(&bcm->isr_tasklet, 4011 tasklet_init(&bcm->isr_tasklet,
3827 (void (*)(unsigned long))bcm43xx_interrupt_tasklet, 4012 (void (*)(unsigned long))bcm43xx_interrupt_tasklet,
@@ -3940,7 +4125,6 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
3940 bcm43xx_debugfs_remove_device(bcm); 4125 bcm43xx_debugfs_remove_device(bcm);
3941 unregister_netdev(net_dev); 4126 unregister_netdev(net_dev);
3942 bcm43xx_detach_board(bcm); 4127 bcm43xx_detach_board(bcm);
3943 assert(bcm->ucode == NULL);
3944 free_ieee80211softmac(net_dev); 4128 free_ieee80211softmac(net_dev);
3945} 4129}
3946 4130
@@ -3950,47 +4134,31 @@ static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
3950static void bcm43xx_chip_reset(void *_bcm) 4134static void bcm43xx_chip_reset(void *_bcm)
3951{ 4135{
3952 struct bcm43xx_private *bcm = _bcm; 4136 struct bcm43xx_private *bcm = _bcm;
3953 struct net_device *net_dev = bcm->net_dev; 4137 struct bcm43xx_phyinfo *phy;
3954 struct pci_dev *pci_dev = bcm->pci_dev; 4138 int err = -ENODEV;
3955 int err;
3956 int was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
3957
3958 netif_stop_queue(bcm->net_dev);
3959 tasklet_disable(&bcm->isr_tasklet);
3960 4139
3961 bcm->firmware_norelease = 1; 4140 mutex_lock(&(bcm)->mutex);
3962 if (was_initialized) 4141 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
3963 bcm43xx_free_board(bcm); 4142 bcm43xx_periodic_tasks_delete(bcm);
3964 bcm->firmware_norelease = 0; 4143 phy = bcm43xx_current_phy(bcm);
3965 bcm43xx_detach_board(bcm); 4144 err = bcm43xx_select_wireless_core(bcm, phy->type);
3966 err = bcm43xx_init_private(bcm, net_dev, pci_dev); 4145 if (!err)
3967 if (err) 4146 bcm43xx_periodic_tasks_setup(bcm);
3968 goto failure;
3969 err = bcm43xx_attach_board(bcm);
3970 if (err)
3971 goto failure;
3972 if (was_initialized) {
3973 err = bcm43xx_init_board(bcm);
3974 if (err)
3975 goto failure;
3976 } 4147 }
3977 netif_wake_queue(bcm->net_dev); 4148 mutex_unlock(&(bcm)->mutex);
3978 printk(KERN_INFO PFX "Controller restarted\n");
3979 4149
3980 return; 4150 printk(KERN_ERR PFX "Controller restart%s\n",
3981failure: 4151 (err == 0) ? "ed" : " failed");
3982 printk(KERN_ERR PFX "Controller restart failed\n");
3983} 4152}
3984 4153
3985/* Hard-reset the chip. 4154/* Hard-reset the chip.
3986 * This can be called from interrupt or process context. 4155 * This can be called from interrupt or process context.
3987 * Make sure to _not_ re-enable device interrupts after this has been called. 4156 * bcm->irq_lock must be locked.
3988*/ 4157 */
3989void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason) 4158void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
3990{ 4159{
3991 bcm43xx_set_status(bcm, BCM43xx_STAT_RESTARTING); 4160 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
3992 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 4161 return;
3993 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
3994 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason); 4162 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
3995 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm); 4163 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset, bcm);
3996 schedule_work(&bcm->restart_work); 4164 schedule_work(&bcm->restart_work);
@@ -4002,21 +4170,16 @@ static int bcm43xx_suspend(struct pci_dev *pdev, pm_message_t state)
4002{ 4170{
4003 struct net_device *net_dev = pci_get_drvdata(pdev); 4171 struct net_device *net_dev = pci_get_drvdata(pdev);
4004 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 4172 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4005 unsigned long flags; 4173 int err;
4006 int try_to_shutdown = 0, err;
4007 4174
4008 dprintk(KERN_INFO PFX "Suspending...\n"); 4175 dprintk(KERN_INFO PFX "Suspending...\n");
4009 4176
4010 bcm43xx_lock_irqsafe(bcm, flags);
4011 bcm->was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
4012 if (bcm->was_initialized)
4013 try_to_shutdown = 1;
4014 bcm43xx_unlock_irqsafe(bcm, flags);
4015
4016 netif_device_detach(net_dev); 4177 netif_device_detach(net_dev);
4017 if (try_to_shutdown) { 4178 bcm->was_initialized = 0;
4179 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
4180 bcm->was_initialized = 1;
4018 ieee80211softmac_stop(net_dev); 4181 ieee80211softmac_stop(net_dev);
4019 err = bcm43xx_disable_interrupts_sync(bcm, &bcm->irq_savedstate); 4182 err = bcm43xx_disable_interrupts_sync(bcm);
4020 if (unlikely(err)) { 4183 if (unlikely(err)) {
4021 dprintk(KERN_ERR PFX "Suspend failed.\n"); 4184 dprintk(KERN_ERR PFX "Suspend failed.\n");
4022 return -EAGAIN; 4185 return -EAGAIN;
@@ -4049,17 +4212,14 @@ static int bcm43xx_resume(struct pci_dev *pdev)
4049 pci_restore_state(pdev); 4212 pci_restore_state(pdev);
4050 4213
4051 bcm43xx_chipset_attach(bcm); 4214 bcm43xx_chipset_attach(bcm);
4052 if (bcm->was_initialized) { 4215 if (bcm->was_initialized)
4053 bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
4054 err = bcm43xx_init_board(bcm); 4216 err = bcm43xx_init_board(bcm);
4055 }
4056 if (err) { 4217 if (err) {
4057 printk(KERN_ERR PFX "Resume failed!\n"); 4218 printk(KERN_ERR PFX "Resume failed!\n");
4058 return err; 4219 return err;
4059 } 4220 }
4060
4061 netif_device_attach(net_dev); 4221 netif_device_attach(net_dev);
4062 4222
4063 dprintk(KERN_INFO PFX "Device resumed.\n"); 4223 dprintk(KERN_INFO PFX "Device resumed.\n");
4064 4224
4065 return 0; 4225 return 0;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
index 116493671f88..f76357178e4d 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
@@ -133,11 +133,17 @@ void bcm43xx_dummy_transmission(struct bcm43xx_private *bcm);
133 133
134int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *new_core); 134int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *new_core);
135 135
136int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
137 int phytype);
138
136void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy); 139void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy);
137 140
138void bcm43xx_mac_suspend(struct bcm43xx_private *bcm); 141void bcm43xx_mac_suspend(struct bcm43xx_private *bcm);
139void bcm43xx_mac_enable(struct bcm43xx_private *bcm); 142void bcm43xx_mac_enable(struct bcm43xx_private *bcm);
140 143
144void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm);
145void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm);
146
141void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason); 147void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason);
142 148
143int bcm43xx_sprom_read(struct bcm43xx_private *bcm, u16 *sprom); 149int bcm43xx_sprom_read(struct bcm43xx_private *bcm, u16 *sprom);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index f8200deecc8a..eafd0f662686 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -81,6 +81,16 @@ static const s8 bcm43xx_tssi2dbm_g_table[] = {
81static void bcm43xx_phy_initg(struct bcm43xx_private *bcm); 81static void bcm43xx_phy_initg(struct bcm43xx_private *bcm);
82 82
83 83
84static inline
85void bcm43xx_voluntary_preempt(void)
86{
87 assert(!in_atomic() && !in_irq() &&
88 !in_interrupt() && !irqs_disabled());
89#ifndef CONFIG_PREEMPT
90 cond_resched();
91#endif /* CONFIG_PREEMPT */
92}
93
84void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm) 94void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm)
85{ 95{
86 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 96 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
@@ -133,22 +143,14 @@ void bcm43xx_phy_write(struct bcm43xx_private *bcm, u16 offset, u16 val)
133void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm) 143void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm)
134{ 144{
135 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 145 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
136 unsigned long flags;
137 146
138 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */ 147 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */
139 if (phy->calibrated) 148 if (phy->calibrated)
140 return; 149 return;
141 if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) { 150 if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) {
142 /* We do not want to be preempted while calibrating
143 * the hardware.
144 */
145 local_irq_save(flags);
146
147 bcm43xx_wireless_core_reset(bcm, 0); 151 bcm43xx_wireless_core_reset(bcm, 0);
148 bcm43xx_phy_initg(bcm); 152 bcm43xx_phy_initg(bcm);
149 bcm43xx_wireless_core_reset(bcm, 1); 153 bcm43xx_wireless_core_reset(bcm, 1);
150
151 local_irq_restore(flags);
152 } 154 }
153 phy->calibrated = 1; 155 phy->calibrated = 1;
154} 156}
@@ -1299,7 +1301,9 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
1299{ 1301{
1300 int i; 1302 int i;
1301 u16 ret = 0; 1303 u16 ret = 0;
1304 unsigned long flags;
1302 1305
1306 local_irq_save(flags);
1303 for (i = 0; i < 10; i++){ 1307 for (i = 0; i < 10; i++){
1304 bcm43xx_phy_write(bcm, 0x0015, 0xAFA0); 1308 bcm43xx_phy_write(bcm, 0x0015, 0xAFA0);
1305 udelay(1); 1309 udelay(1);
@@ -1309,6 +1313,8 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
1309 udelay(40); 1313 udelay(40);
1310 ret += bcm43xx_phy_read(bcm, 0x002C); 1314 ret += bcm43xx_phy_read(bcm, 0x002C);
1311 } 1315 }
1316 local_irq_restore(flags);
1317 bcm43xx_voluntary_preempt();
1312 1318
1313 return ret; 1319 return ret;
1314} 1320}
@@ -1435,6 +1441,7 @@ u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control)
1435 } 1441 }
1436 ret = bcm43xx_phy_read(bcm, 0x002D); 1442 ret = bcm43xx_phy_read(bcm, 0x002D);
1437 local_irq_restore(flags); 1443 local_irq_restore(flags);
1444 bcm43xx_voluntary_preempt();
1438 1445
1439 return ret; 1446 return ret;
1440} 1447}
@@ -1760,6 +1767,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1760 bcm43xx_radio_write16(bcm, 0x43, i); 1767 bcm43xx_radio_write16(bcm, 0x43, i);
1761 bcm43xx_radio_write16(bcm, 0x52, radio->txctl2); 1768 bcm43xx_radio_write16(bcm, 0x52, radio->txctl2);
1762 udelay(10); 1769 udelay(10);
1770 bcm43xx_voluntary_preempt();
1763 1771
1764 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2); 1772 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
1765 1773
@@ -1803,6 +1811,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1803 radio->txctl2 1811 radio->txctl2
1804 | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above? 1812 | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above?
1805 udelay(10); 1813 udelay(10);
1814 bcm43xx_voluntary_preempt();
1806 1815
1807 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2); 1816 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
1808 1817
@@ -1824,6 +1833,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1824 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2); 1833 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2);
1825 udelay(2); 1834 udelay(2);
1826 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3); 1835 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3);
1836 bcm43xx_voluntary_preempt();
1827 } else 1837 } else
1828 bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0); 1838 bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0);
1829 bcm43xx_phy_lo_adjust(bcm, is_initializing); 1839 bcm43xx_phy_lo_adjust(bcm, is_initializing);
@@ -2188,12 +2198,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm)
2188{ 2198{
2189 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 2199 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2190 int err = -ENODEV; 2200 int err = -ENODEV;
2191 unsigned long flags;
2192
2193 /* We do not want to be preempted while calibrating
2194 * the hardware.
2195 */
2196 local_irq_save(flags);
2197 2201
2198 switch (phy->type) { 2202 switch (phy->type) {
2199 case BCM43xx_PHYTYPE_A: 2203 case BCM43xx_PHYTYPE_A:
@@ -2227,7 +2231,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm)
2227 err = 0; 2231 err = 0;
2228 break; 2232 break;
2229 } 2233 }
2230 local_irq_restore(flags);
2231 if (err) 2234 if (err)
2232 printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n"); 2235 printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n");
2233 2236
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
index 574085c46152..c60c1743ea06 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
@@ -262,7 +262,7 @@ static void tx_tasklet(unsigned long d)
262 int err; 262 int err;
263 u16 txctl; 263 u16 txctl;
264 264
265 bcm43xx_lock_irqonly(bcm, flags); 265 spin_lock_irqsave(&bcm->irq_lock, flags);
266 266
267 if (queue->tx_frozen) 267 if (queue->tx_frozen)
268 goto out_unlock; 268 goto out_unlock;
@@ -300,7 +300,7 @@ static void tx_tasklet(unsigned long d)
300 continue; 300 continue;
301 } 301 }
302out_unlock: 302out_unlock:
303 bcm43xx_unlock_irqonly(bcm, flags); 303 spin_unlock_irqrestore(&bcm->irq_lock, flags);
304} 304}
305 305
306static void setup_txqueues(struct bcm43xx_pioqueue *queue) 306static void setup_txqueues(struct bcm43xx_pioqueue *queue)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
index 6a23bdc75412..c71b998a3694 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
@@ -120,12 +120,14 @@ static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
120 GFP_KERNEL); 120 GFP_KERNEL);
121 if (!sprom) 121 if (!sprom)
122 return -ENOMEM; 122 return -ENOMEM;
123 bcm43xx_lock_irqsafe(bcm, flags); 123 mutex_lock(&bcm->mutex);
124 spin_lock_irqsave(&bcm->irq_lock, flags);
124 err = bcm43xx_sprom_read(bcm, sprom); 125 err = bcm43xx_sprom_read(bcm, sprom);
125 if (!err) 126 if (!err)
126 err = sprom2hex(sprom, buf, PAGE_SIZE); 127 err = sprom2hex(sprom, buf, PAGE_SIZE);
127 mmiowb(); 128 mmiowb();
128 bcm43xx_unlock_irqsafe(bcm, flags); 129 spin_unlock_irqrestore(&bcm->irq_lock, flags);
130 mutex_unlock(&bcm->mutex);
129 kfree(sprom); 131 kfree(sprom);
130 132
131 return err; 133 return err;
@@ -150,10 +152,14 @@ static ssize_t bcm43xx_attr_sprom_store(struct device *dev,
150 err = hex2sprom(sprom, buf, count); 152 err = hex2sprom(sprom, buf, count);
151 if (err) 153 if (err)
152 goto out_kfree; 154 goto out_kfree;
153 bcm43xx_lock_irqsafe(bcm, flags); 155 mutex_lock(&bcm->mutex);
156 spin_lock_irqsave(&bcm->irq_lock, flags);
157 spin_lock(&bcm->leds_lock);
154 err = bcm43xx_sprom_write(bcm, sprom); 158 err = bcm43xx_sprom_write(bcm, sprom);
155 mmiowb(); 159 mmiowb();
156 bcm43xx_unlock_irqsafe(bcm, flags); 160 spin_unlock(&bcm->leds_lock);
161 spin_unlock_irqrestore(&bcm->irq_lock, flags);
162 mutex_unlock(&bcm->mutex);
157out_kfree: 163out_kfree:
158 kfree(sprom); 164 kfree(sprom);
159 165
@@ -170,13 +176,12 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
170 char *buf) 176 char *buf)
171{ 177{
172 struct bcm43xx_private *bcm = dev_to_bcm(dev); 178 struct bcm43xx_private *bcm = dev_to_bcm(dev);
173 int err;
174 ssize_t count = 0; 179 ssize_t count = 0;
175 180
176 if (!capable(CAP_NET_ADMIN)) 181 if (!capable(CAP_NET_ADMIN))
177 return -EPERM; 182 return -EPERM;
178 183
179 bcm43xx_lock_noirq(bcm); 184 mutex_lock(&bcm->mutex);
180 185
181 switch (bcm43xx_current_radio(bcm)->interfmode) { 186 switch (bcm43xx_current_radio(bcm)->interfmode) {
182 case BCM43xx_RADIO_INTERFMODE_NONE: 187 case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -191,11 +196,10 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
191 default: 196 default:
192 assert(0); 197 assert(0);
193 } 198 }
194 err = 0;
195 199
196 bcm43xx_unlock_noirq(bcm); 200 mutex_unlock(&bcm->mutex);
197 201
198 return err ? err : count; 202 return count;
199 203
200} 204}
201 205
@@ -229,7 +233,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
229 return -EINVAL; 233 return -EINVAL;
230 } 234 }
231 235
232 bcm43xx_lock_irqsafe(bcm, flags); 236 mutex_lock(&bcm->mutex);
237 spin_lock_irqsave(&bcm->irq_lock, flags);
233 238
234 err = bcm43xx_radio_set_interference_mitigation(bcm, mode); 239 err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
235 if (err) { 240 if (err) {
@@ -237,7 +242,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
237 "supported by device\n"); 242 "supported by device\n");
238 } 243 }
239 mmiowb(); 244 mmiowb();
240 bcm43xx_unlock_irqsafe(bcm, flags); 245 spin_unlock_irqrestore(&bcm->irq_lock, flags);
246 mutex_unlock(&bcm->mutex);
241 247
242 return err ? err : count; 248 return err ? err : count;
243} 249}
@@ -251,23 +257,21 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
251 char *buf) 257 char *buf)
252{ 258{
253 struct bcm43xx_private *bcm = dev_to_bcm(dev); 259 struct bcm43xx_private *bcm = dev_to_bcm(dev);
254 int err;
255 ssize_t count; 260 ssize_t count;
256 261
257 if (!capable(CAP_NET_ADMIN)) 262 if (!capable(CAP_NET_ADMIN))
258 return -EPERM; 263 return -EPERM;
259 264
260 bcm43xx_lock_noirq(bcm); 265 mutex_lock(&bcm->mutex);
261 266
262 if (bcm->short_preamble) 267 if (bcm->short_preamble)
263 count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n"); 268 count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n");
264 else 269 else
265 count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n"); 270 count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n");
266 271
267 err = 0; 272 mutex_unlock(&bcm->mutex);
268 bcm43xx_unlock_noirq(bcm);
269 273
270 return err ? err : count; 274 return count;
271} 275}
272 276
273static ssize_t bcm43xx_attr_preamble_store(struct device *dev, 277static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
@@ -276,7 +280,6 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
276{ 280{
277 struct bcm43xx_private *bcm = dev_to_bcm(dev); 281 struct bcm43xx_private *bcm = dev_to_bcm(dev);
278 unsigned long flags; 282 unsigned long flags;
279 int err;
280 int value; 283 int value;
281 284
282 if (!capable(CAP_NET_ADMIN)) 285 if (!capable(CAP_NET_ADMIN))
@@ -285,20 +288,141 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
285 value = get_boolean(buf, count); 288 value = get_boolean(buf, count);
286 if (value < 0) 289 if (value < 0)
287 return value; 290 return value;
288 bcm43xx_lock_irqsafe(bcm, flags); 291 mutex_lock(&bcm->mutex);
292 spin_lock_irqsave(&bcm->irq_lock, flags);
289 293
290 bcm->short_preamble = !!value; 294 bcm->short_preamble = !!value;
291 295
292 err = 0; 296 spin_unlock_irqrestore(&bcm->irq_lock, flags);
293 bcm43xx_unlock_irqsafe(bcm, flags); 297 mutex_unlock(&bcm->mutex);
294 298
295 return err ? err : count; 299 return count;
296} 300}
297 301
298static DEVICE_ATTR(shortpreamble, 0644, 302static DEVICE_ATTR(shortpreamble, 0644,
299 bcm43xx_attr_preamble_show, 303 bcm43xx_attr_preamble_show,
300 bcm43xx_attr_preamble_store); 304 bcm43xx_attr_preamble_store);
301 305
306static ssize_t bcm43xx_attr_phymode_store(struct device *dev,
307 struct device_attribute *attr,
308 const char *buf, size_t count)
309{
310 struct bcm43xx_private *bcm = dev_to_bcm(dev);
311 int phytype;
312 int err = -EINVAL;
313
314 if (count < 1)
315 goto out;
316 switch (buf[0]) {
317 case 'a': case 'A':
318 phytype = BCM43xx_PHYTYPE_A;
319 break;
320 case 'b': case 'B':
321 phytype = BCM43xx_PHYTYPE_B;
322 break;
323 case 'g': case 'G':
324 phytype = BCM43xx_PHYTYPE_G;
325 break;
326 default:
327 goto out;
328 }
329
330 bcm43xx_periodic_tasks_delete(bcm);
331 mutex_lock(&(bcm)->mutex);
332 err = bcm43xx_select_wireless_core(bcm, phytype);
333 if (!err)
334 bcm43xx_periodic_tasks_setup(bcm);
335 mutex_unlock(&(bcm)->mutex);
336 if (err == -ESRCH)
337 err = -ENODEV;
338
339out:
340 return err ? err : count;
341}
342
343static ssize_t bcm43xx_attr_phymode_show(struct device *dev,
344 struct device_attribute *attr,
345 char *buf)
346{
347 struct bcm43xx_private *bcm = dev_to_bcm(dev);
348 ssize_t count = 0;
349
350 mutex_lock(&(bcm)->mutex);
351 switch (bcm43xx_current_phy(bcm)->type) {
352 case BCM43xx_PHYTYPE_A:
353 snprintf(buf, PAGE_SIZE, "A");
354 break;
355 case BCM43xx_PHYTYPE_B:
356 snprintf(buf, PAGE_SIZE, "B");
357 break;
358 case BCM43xx_PHYTYPE_G:
359 snprintf(buf, PAGE_SIZE, "G");
360 break;
361 default:
362 assert(0);
363 }
364 mutex_unlock(&(bcm)->mutex);
365
366 return count;
367}
368
369static DEVICE_ATTR(phymode, 0644,
370 bcm43xx_attr_phymode_show,
371 bcm43xx_attr_phymode_store);
372
373static ssize_t bcm43xx_attr_microcode_show(struct device *dev,
374 struct device_attribute *attr,
375 char *buf)
376{
377 unsigned long flags;
378 struct bcm43xx_private *bcm = dev_to_bcm(dev);
379 ssize_t count = 0;
380 u16 status;
381
382 if (!capable(CAP_NET_ADMIN))
383 return -EPERM;
384
385 mutex_lock(&(bcm)->mutex);
386 spin_lock_irqsave(&bcm->irq_lock, flags);
387 status = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
388 BCM43xx_UCODE_STATUS);
389
390 spin_unlock_irqrestore(&bcm->irq_lock, flags);
391 mutex_unlock(&(bcm)->mutex);
392 switch (status) {
393 case 0x0000:
394 count = snprintf(buf, PAGE_SIZE, "0x%.4x (invalid)\n",
395 status);
396 break;
397 case 0x0001:
398 count = snprintf(buf, PAGE_SIZE, "0x%.4x (init)\n",
399 status);
400 break;
401 case 0x0002:
402 count = snprintf(buf, PAGE_SIZE, "0x%.4x (active)\n",
403 status);
404 break;
405 case 0x0003:
406 count = snprintf(buf, PAGE_SIZE, "0x%.4x (suspended)\n",
407 status);
408 break;
409 case 0x0004:
410 count = snprintf(buf, PAGE_SIZE, "0x%.4x (asleep)\n",
411 status);
412 break;
413 default:
414 count = snprintf(buf, PAGE_SIZE, "0x%.4x (unknown)\n",
415 status);
416 break;
417 }
418
419 return count;
420}
421
422static DEVICE_ATTR(microcodestatus, 0444,
423 bcm43xx_attr_microcode_show,
424 NULL);
425
302int bcm43xx_sysfs_register(struct bcm43xx_private *bcm) 426int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
303{ 427{
304 struct device *dev = &bcm->pci_dev->dev; 428 struct device *dev = &bcm->pci_dev->dev;
@@ -315,9 +439,19 @@ int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
315 err = device_create_file(dev, &dev_attr_shortpreamble); 439 err = device_create_file(dev, &dev_attr_shortpreamble);
316 if (err) 440 if (err)
317 goto err_remove_interfmode; 441 goto err_remove_interfmode;
442 err = device_create_file(dev, &dev_attr_phymode);
443 if (err)
444 goto err_remove_shortpreamble;
445 err = device_create_file(dev, &dev_attr_microcodestatus);
446 if (err)
447 goto err_remove_phymode;
318 448
319out: 449out:
320 return err; 450 return err;
451err_remove_phymode:
452 device_remove_file(dev, &dev_attr_phymode);
453err_remove_shortpreamble:
454 device_remove_file(dev, &dev_attr_shortpreamble);
321err_remove_interfmode: 455err_remove_interfmode:
322 device_remove_file(dev, &dev_attr_interference); 456 device_remove_file(dev, &dev_attr_interference);
323err_remove_sprom: 457err_remove_sprom:
@@ -329,6 +463,8 @@ void bcm43xx_sysfs_unregister(struct bcm43xx_private *bcm)
329{ 463{
330 struct device *dev = &bcm->pci_dev->dev; 464 struct device *dev = &bcm->pci_dev->dev;
331 465
466 device_remove_file(dev, &dev_attr_microcodestatus);
467 device_remove_file(dev, &dev_attr_phymode);
332 device_remove_file(dev, &dev_attr_shortpreamble); 468 device_remove_file(dev, &dev_attr_shortpreamble);
333 device_remove_file(dev, &dev_attr_interference); 469 device_remove_file(dev, &dev_attr_interference);
334 device_remove_file(dev, &dev_attr_sprom); 470 device_remove_file(dev, &dev_attr_sprom);
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 5c36e29efff7..888077fc14c4 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -47,6 +47,8 @@
47#define BCM43xx_WX_VERSION 18 47#define BCM43xx_WX_VERSION 18
48 48
49#define MAX_WX_STRING 80 49#define MAX_WX_STRING 80
50/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
51#define RX_RSSI_MAX 60
50 52
51 53
52static int bcm43xx_wx_get_name(struct net_device *net_dev, 54static int bcm43xx_wx_get_name(struct net_device *net_dev,
@@ -56,12 +58,11 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
56{ 58{
57 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 59 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
58 int i; 60 int i;
59 unsigned long flags;
60 struct bcm43xx_phyinfo *phy; 61 struct bcm43xx_phyinfo *phy;
61 char suffix[7] = { 0 }; 62 char suffix[7] = { 0 };
62 int have_a = 0, have_b = 0, have_g = 0; 63 int have_a = 0, have_b = 0, have_g = 0;
63 64
64 bcm43xx_lock_irqsafe(bcm, flags); 65 mutex_lock(&bcm->mutex);
65 for (i = 0; i < bcm->nr_80211_available; i++) { 66 for (i = 0; i < bcm->nr_80211_available; i++) {
66 phy = &(bcm->core_80211_ext[i].phy); 67 phy = &(bcm->core_80211_ext[i].phy);
67 switch (phy->type) { 68 switch (phy->type) {
@@ -77,7 +78,7 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
77 assert(0); 78 assert(0);
78 } 79 }
79 } 80 }
80 bcm43xx_unlock_irqsafe(bcm, flags); 81 mutex_unlock(&bcm->mutex);
81 82
82 i = 0; 83 i = 0;
83 if (have_a) { 84 if (have_a) {
@@ -111,7 +112,9 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
111 int freq; 112 int freq;
112 int err = -EINVAL; 113 int err = -EINVAL;
113 114
114 bcm43xx_lock_irqsafe(bcm, flags); 115 mutex_lock(&bcm->mutex);
116 spin_lock_irqsave(&bcm->irq_lock, flags);
117
115 if ((data->freq.m >= 0) && (data->freq.m <= 1000)) { 118 if ((data->freq.m >= 0) && (data->freq.m <= 1000)) {
116 channel = data->freq.m; 119 channel = data->freq.m;
117 freq = bcm43xx_channel_to_freq(bcm, channel); 120 freq = bcm43xx_channel_to_freq(bcm, channel);
@@ -131,7 +134,8 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
131 err = 0; 134 err = 0;
132 } 135 }
133out_unlock: 136out_unlock:
134 bcm43xx_unlock_irqsafe(bcm, flags); 137 spin_unlock_irqrestore(&bcm->irq_lock, flags);
138 mutex_unlock(&bcm->mutex);
135 139
136 return err; 140 return err;
137} 141}
@@ -143,11 +147,10 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
143{ 147{
144 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 148 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
145 struct bcm43xx_radioinfo *radio; 149 struct bcm43xx_radioinfo *radio;
146 unsigned long flags;
147 int err = -ENODEV; 150 int err = -ENODEV;
148 u16 channel; 151 u16 channel;
149 152
150 bcm43xx_lock_irqsafe(bcm, flags); 153 mutex_lock(&bcm->mutex);
151 radio = bcm43xx_current_radio(bcm); 154 radio = bcm43xx_current_radio(bcm);
152 channel = radio->channel; 155 channel = radio->channel;
153 if (channel == 0xFF) { 156 if (channel == 0xFF) {
@@ -162,7 +165,7 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
162 165
163 err = 0; 166 err = 0;
164out_unlock: 167out_unlock:
165 bcm43xx_unlock_irqsafe(bcm, flags); 168 mutex_unlock(&bcm->mutex);
166 169
167 return err; 170 return err;
168} 171}
@@ -180,13 +183,15 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
180 if (mode == IW_MODE_AUTO) 183 if (mode == IW_MODE_AUTO)
181 mode = BCM43xx_INITIAL_IWMODE; 184 mode = BCM43xx_INITIAL_IWMODE;
182 185
183 bcm43xx_lock_irqsafe(bcm, flags); 186 mutex_lock(&bcm->mutex);
187 spin_lock_irqsave(&bcm->irq_lock, flags);
184 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 188 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
185 if (bcm->ieee->iw_mode != mode) 189 if (bcm->ieee->iw_mode != mode)
186 bcm43xx_set_iwmode(bcm, mode); 190 bcm43xx_set_iwmode(bcm, mode);
187 } else 191 } else
188 bcm->ieee->iw_mode = mode; 192 bcm->ieee->iw_mode = mode;
189 bcm43xx_unlock_irqsafe(bcm, flags); 193 spin_unlock_irqrestore(&bcm->irq_lock, flags);
194 mutex_unlock(&bcm->mutex);
190 195
191 return 0; 196 return 0;
192} 197}
@@ -197,11 +202,10 @@ static int bcm43xx_wx_get_mode(struct net_device *net_dev,
197 char *extra) 202 char *extra)
198{ 203{
199 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 204 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
200 unsigned long flags;
201 205
202 bcm43xx_lock_irqsafe(bcm, flags); 206 mutex_lock(&bcm->mutex);
203 data->mode = bcm->ieee->iw_mode; 207 data->mode = bcm->ieee->iw_mode;
204 bcm43xx_unlock_irqsafe(bcm, flags); 208 mutex_unlock(&bcm->mutex);
205 209
206 return 0; 210 return 0;
207} 211}
@@ -214,7 +218,6 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
214 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 218 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
215 struct iw_range *range = (struct iw_range *)extra; 219 struct iw_range *range = (struct iw_range *)extra;
216 const struct ieee80211_geo *geo; 220 const struct ieee80211_geo *geo;
217 unsigned long flags;
218 int i, j; 221 int i, j;
219 struct bcm43xx_phyinfo *phy; 222 struct bcm43xx_phyinfo *phy;
220 223
@@ -226,15 +229,14 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
226 range->throughput = 27 * 1000 * 1000; 229 range->throughput = 27 * 1000 * 1000;
227 230
228 range->max_qual.qual = 100; 231 range->max_qual.qual = 100;
229 /* TODO: Real max RSSI */ 232 range->max_qual.level = 146; /* set floor at -110 dBm (146 - 256) */
230 range->max_qual.level = 3; 233 range->max_qual.noise = 146;
231 range->max_qual.noise = 100; 234 range->max_qual.updated = IW_QUAL_ALL_UPDATED;
232 range->max_qual.updated = 7;
233 235
234 range->avg_qual.qual = 70; 236 range->avg_qual.qual = 50;
235 range->avg_qual.level = 2; 237 range->avg_qual.level = 0;
236 range->avg_qual.noise = 40; 238 range->avg_qual.noise = 0;
237 range->avg_qual.updated = 7; 239 range->avg_qual.updated = IW_QUAL_ALL_UPDATED;
238 240
239 range->min_rts = BCM43xx_MIN_RTS_THRESHOLD; 241 range->min_rts = BCM43xx_MIN_RTS_THRESHOLD;
240 range->max_rts = BCM43xx_MAX_RTS_THRESHOLD; 242 range->max_rts = BCM43xx_MAX_RTS_THRESHOLD;
@@ -254,7 +256,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
254 IW_ENC_CAPA_CIPHER_TKIP | 256 IW_ENC_CAPA_CIPHER_TKIP |
255 IW_ENC_CAPA_CIPHER_CCMP; 257 IW_ENC_CAPA_CIPHER_CCMP;
256 258
257 bcm43xx_lock_irqsafe(bcm, flags); 259 mutex_lock(&bcm->mutex);
258 phy = bcm43xx_current_phy(bcm); 260 phy = bcm43xx_current_phy(bcm);
259 261
260 range->num_bitrates = 0; 262 range->num_bitrates = 0;
@@ -301,7 +303,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
301 } 303 }
302 range->num_frequency = j; 304 range->num_frequency = j;
303 305
304 bcm43xx_unlock_irqsafe(bcm, flags); 306 mutex_unlock(&bcm->mutex);
305 307
306 return 0; 308 return 0;
307} 309}
@@ -314,11 +316,11 @@ static int bcm43xx_wx_set_nick(struct net_device *net_dev,
314 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 316 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
315 size_t len; 317 size_t len;
316 318
317 bcm43xx_lock_noirq(bcm); 319 mutex_lock(&bcm->mutex);
318 len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE); 320 len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE);
319 memcpy(bcm->nick, extra, len); 321 memcpy(bcm->nick, extra, len);
320 bcm->nick[len] = '\0'; 322 bcm->nick[len] = '\0';
321 bcm43xx_unlock_noirq(bcm); 323 mutex_unlock(&bcm->mutex);
322 324
323 return 0; 325 return 0;
324} 326}
@@ -331,12 +333,12 @@ static int bcm43xx_wx_get_nick(struct net_device *net_dev,
331 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 333 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
332 size_t len; 334 size_t len;
333 335
334 bcm43xx_lock_noirq(bcm); 336 mutex_lock(&bcm->mutex);
335 len = strlen(bcm->nick) + 1; 337 len = strlen(bcm->nick) + 1;
336 memcpy(extra, bcm->nick, len); 338 memcpy(extra, bcm->nick, len);
337 data->data.length = (__u16)len; 339 data->data.length = (__u16)len;
338 data->data.flags = 1; 340 data->data.flags = 1;
339 bcm43xx_unlock_noirq(bcm); 341 mutex_unlock(&bcm->mutex);
340 342
341 return 0; 343 return 0;
342} 344}
@@ -350,7 +352,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
350 unsigned long flags; 352 unsigned long flags;
351 int err = -EINVAL; 353 int err = -EINVAL;
352 354
353 bcm43xx_lock_irqsafe(bcm, flags); 355 mutex_lock(&bcm->mutex);
356 spin_lock_irqsave(&bcm->irq_lock, flags);
354 if (data->rts.disabled) { 357 if (data->rts.disabled) {
355 bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD; 358 bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD;
356 err = 0; 359 err = 0;
@@ -361,7 +364,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
361 err = 0; 364 err = 0;
362 } 365 }
363 } 366 }
364 bcm43xx_unlock_irqsafe(bcm, flags); 367 spin_unlock_irqrestore(&bcm->irq_lock, flags);
368 mutex_unlock(&bcm->mutex);
365 369
366 return err; 370 return err;
367} 371}
@@ -372,13 +376,12 @@ static int bcm43xx_wx_get_rts(struct net_device *net_dev,
372 char *extra) 376 char *extra)
373{ 377{
374 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 378 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
375 unsigned long flags;
376 379
377 bcm43xx_lock_irqsafe(bcm, flags); 380 mutex_lock(&bcm->mutex);
378 data->rts.value = bcm->rts_threshold; 381 data->rts.value = bcm->rts_threshold;
379 data->rts.fixed = 0; 382 data->rts.fixed = 0;
380 data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD); 383 data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD);
381 bcm43xx_unlock_irqsafe(bcm, flags); 384 mutex_unlock(&bcm->mutex);
382 385
383 return 0; 386 return 0;
384} 387}
@@ -392,7 +395,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
392 unsigned long flags; 395 unsigned long flags;
393 int err = -EINVAL; 396 int err = -EINVAL;
394 397
395 bcm43xx_lock_irqsafe(bcm, flags); 398 mutex_lock(&bcm->mutex);
399 spin_lock_irqsave(&bcm->irq_lock, flags);
396 if (data->frag.disabled) { 400 if (data->frag.disabled) {
397 bcm->ieee->fts = MAX_FRAG_THRESHOLD; 401 bcm->ieee->fts = MAX_FRAG_THRESHOLD;
398 err = 0; 402 err = 0;
@@ -403,7 +407,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
403 err = 0; 407 err = 0;
404 } 408 }
405 } 409 }
406 bcm43xx_unlock_irqsafe(bcm, flags); 410 spin_unlock_irqrestore(&bcm->irq_lock, flags);
411 mutex_unlock(&bcm->mutex);
407 412
408 return err; 413 return err;
409} 414}
@@ -414,13 +419,12 @@ static int bcm43xx_wx_get_frag(struct net_device *net_dev,
414 char *extra) 419 char *extra)
415{ 420{
416 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 421 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
417 unsigned long flags;
418 422
419 bcm43xx_lock_irqsafe(bcm, flags); 423 mutex_lock(&bcm->mutex);
420 data->frag.value = bcm->ieee->fts; 424 data->frag.value = bcm->ieee->fts;
421 data->frag.fixed = 0; 425 data->frag.fixed = 0;
422 data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD); 426 data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD);
423 bcm43xx_unlock_irqsafe(bcm, flags); 427 mutex_unlock(&bcm->mutex);
424 428
425 return 0; 429 return 0;
426} 430}
@@ -442,7 +446,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
442 return -EOPNOTSUPP; 446 return -EOPNOTSUPP;
443 } 447 }
444 448
445 bcm43xx_lock_irqsafe(bcm, flags); 449 mutex_lock(&bcm->mutex);
450 spin_lock_irqsave(&bcm->irq_lock, flags);
446 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 451 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
447 goto out_unlock; 452 goto out_unlock;
448 radio = bcm43xx_current_radio(bcm); 453 radio = bcm43xx_current_radio(bcm);
@@ -466,7 +471,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
466 err = 0; 471 err = 0;
467 472
468out_unlock: 473out_unlock:
469 bcm43xx_unlock_irqsafe(bcm, flags); 474 spin_unlock_irqrestore(&bcm->irq_lock, flags);
475 mutex_unlock(&bcm->mutex);
470 476
471 return err; 477 return err;
472} 478}
@@ -478,10 +484,9 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
478{ 484{
479 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 485 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
480 struct bcm43xx_radioinfo *radio; 486 struct bcm43xx_radioinfo *radio;
481 unsigned long flags;
482 int err = -ENODEV; 487 int err = -ENODEV;
483 488
484 bcm43xx_lock_irqsafe(bcm, flags); 489 mutex_lock(&bcm->mutex);
485 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 490 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
486 goto out_unlock; 491 goto out_unlock;
487 radio = bcm43xx_current_radio(bcm); 492 radio = bcm43xx_current_radio(bcm);
@@ -493,7 +498,7 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
493 498
494 err = 0; 499 err = 0;
495out_unlock: 500out_unlock:
496 bcm43xx_unlock_irqsafe(bcm, flags); 501 mutex_unlock(&bcm->mutex);
497 502
498 return err; 503 return err;
499} 504}
@@ -580,7 +585,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
580 return -EINVAL; 585 return -EINVAL;
581 } 586 }
582 587
583 bcm43xx_lock_irqsafe(bcm, flags); 588 mutex_lock(&bcm->mutex);
589 spin_lock_irqsave(&bcm->irq_lock, flags);
584 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 590 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
585 err = bcm43xx_radio_set_interference_mitigation(bcm, mode); 591 err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
586 if (err) { 592 if (err) {
@@ -595,7 +601,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
595 } else 601 } else
596 bcm43xx_current_radio(bcm)->interfmode = mode; 602 bcm43xx_current_radio(bcm)->interfmode = mode;
597 } 603 }
598 bcm43xx_unlock_irqsafe(bcm, flags); 604 spin_unlock_irqrestore(&bcm->irq_lock, flags);
605 mutex_unlock(&bcm->mutex);
599 606
600 return err; 607 return err;
601} 608}
@@ -606,12 +613,11 @@ static int bcm43xx_wx_get_interfmode(struct net_device *net_dev,
606 char *extra) 613 char *extra)
607{ 614{
608 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 615 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
609 unsigned long flags;
610 int mode; 616 int mode;
611 617
612 bcm43xx_lock_irqsafe(bcm, flags); 618 mutex_lock(&bcm->mutex);
613 mode = bcm43xx_current_radio(bcm)->interfmode; 619 mode = bcm43xx_current_radio(bcm)->interfmode;
614 bcm43xx_unlock_irqsafe(bcm, flags); 620 mutex_unlock(&bcm->mutex);
615 621
616 switch (mode) { 622 switch (mode) {
617 case BCM43xx_RADIO_INTERFMODE_NONE: 623 case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -641,9 +647,11 @@ static int bcm43xx_wx_set_shortpreamble(struct net_device *net_dev,
641 int on; 647 int on;
642 648
643 on = *((int *)extra); 649 on = *((int *)extra);
644 bcm43xx_lock_irqsafe(bcm, flags); 650 mutex_lock(&bcm->mutex);
651 spin_lock_irqsave(&bcm->irq_lock, flags);
645 bcm->short_preamble = !!on; 652 bcm->short_preamble = !!on;
646 bcm43xx_unlock_irqsafe(bcm, flags); 653 spin_unlock_irqrestore(&bcm->irq_lock, flags);
654 mutex_unlock(&bcm->mutex);
647 655
648 return 0; 656 return 0;
649} 657}
@@ -654,12 +662,11 @@ static int bcm43xx_wx_get_shortpreamble(struct net_device *net_dev,
654 char *extra) 662 char *extra)
655{ 663{
656 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 664 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
657 unsigned long flags;
658 int on; 665 int on;
659 666
660 bcm43xx_lock_irqsafe(bcm, flags); 667 mutex_lock(&bcm->mutex);
661 on = bcm->short_preamble; 668 on = bcm->short_preamble;
662 bcm43xx_unlock_irqsafe(bcm, flags); 669 mutex_unlock(&bcm->mutex);
663 670
664 if (on) 671 if (on)
665 strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING); 672 strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING);
@@ -681,11 +688,13 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev,
681 688
682 on = *((int *)extra); 689 on = *((int *)extra);
683 690
684 bcm43xx_lock_irqsafe(bcm, flags); 691 mutex_lock(&bcm->mutex);
692 spin_lock_irqsave(&bcm->irq_lock, flags);
685 bcm->ieee->host_encrypt = !!on; 693 bcm->ieee->host_encrypt = !!on;
686 bcm->ieee->host_decrypt = !!on; 694 bcm->ieee->host_decrypt = !!on;
687 bcm->ieee->host_build_iv = !on; 695 bcm->ieee->host_build_iv = !on;
688 bcm43xx_unlock_irqsafe(bcm, flags); 696 spin_unlock_irqrestore(&bcm->irq_lock, flags);
697 mutex_unlock(&bcm->mutex);
689 698
690 return 0; 699 return 0;
691} 700}
@@ -696,12 +705,11 @@ static int bcm43xx_wx_get_swencryption(struct net_device *net_dev,
696 char *extra) 705 char *extra)
697{ 706{
698 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 707 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
699 unsigned long flags;
700 int on; 708 int on;
701 709
702 bcm43xx_lock_irqsafe(bcm, flags); 710 mutex_lock(&bcm->mutex);
703 on = bcm->ieee->host_encrypt; 711 on = bcm->ieee->host_encrypt;
704 bcm43xx_unlock_irqsafe(bcm, flags); 712 mutex_unlock(&bcm->mutex);
705 713
706 if (on) 714 if (on)
707 strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING); 715 strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING);
@@ -764,11 +772,13 @@ static int bcm43xx_wx_sprom_read(struct net_device *net_dev,
764 if (!sprom) 772 if (!sprom)
765 goto out; 773 goto out;
766 774
767 bcm43xx_lock_irqsafe(bcm, flags); 775 mutex_lock(&bcm->mutex);
776 spin_lock_irqsave(&bcm->irq_lock, flags);
768 err = -ENODEV; 777 err = -ENODEV;
769 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) 778 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
770 err = bcm43xx_sprom_read(bcm, sprom); 779 err = bcm43xx_sprom_read(bcm, sprom);
771 bcm43xx_unlock_irqsafe(bcm, flags); 780 spin_unlock_irqrestore(&bcm->irq_lock, flags);
781 mutex_unlock(&bcm->mutex);
772 if (!err) 782 if (!err)
773 data->data.length = sprom2hex(sprom, extra); 783 data->data.length = sprom2hex(sprom, extra);
774 kfree(sprom); 784 kfree(sprom);
@@ -809,11 +819,15 @@ static int bcm43xx_wx_sprom_write(struct net_device *net_dev,
809 if (err) 819 if (err)
810 goto out_kfree; 820 goto out_kfree;
811 821
812 bcm43xx_lock_irqsafe(bcm, flags); 822 mutex_lock(&bcm->mutex);
823 spin_lock_irqsave(&bcm->irq_lock, flags);
824 spin_lock(&bcm->leds_lock);
813 err = -ENODEV; 825 err = -ENODEV;
814 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) 826 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
815 err = bcm43xx_sprom_write(bcm, sprom); 827 err = bcm43xx_sprom_write(bcm, sprom);
816 bcm43xx_unlock_irqsafe(bcm, flags); 828 spin_unlock(&bcm->leds_lock);
829 spin_unlock_irqrestore(&bcm->irq_lock, flags);
830 mutex_unlock(&bcm->mutex);
817out_kfree: 831out_kfree:
818 kfree(sprom); 832 kfree(sprom);
819out: 833out:
@@ -827,6 +841,10 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
827 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 841 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
828 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); 842 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
829 struct iw_statistics *wstats; 843 struct iw_statistics *wstats;
844 struct ieee80211_network *network = NULL;
845 static int tmp_level = 0;
846 static int tmp_qual = 0;
847 unsigned long flags;
830 848
831 wstats = &bcm->stats.wstats; 849 wstats = &bcm->stats.wstats;
832 if (!mac->associated) { 850 if (!mac->associated) {
@@ -844,16 +862,28 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
844 wstats->qual.level = 0; 862 wstats->qual.level = 0;
845 wstats->qual.noise = 0; 863 wstats->qual.noise = 0;
846 wstats->qual.updated = 7; 864 wstats->qual.updated = 7;
847 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 865 wstats->qual.updated |= IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
848 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
849 return wstats; 866 return wstats;
850 } 867 }
851 /* fill in the real statistics when iface associated */ 868 /* fill in the real statistics when iface associated */
852 wstats->qual.qual = 100; // TODO: get the real signal quality 869 spin_lock_irqsave(&mac->ieee->lock, flags);
853 wstats->qual.level = 3 - bcm->stats.link_quality; 870 list_for_each_entry(network, &mac->ieee->network_list, list) {
871 if (!memcmp(mac->associnfo.bssid, network->bssid, ETH_ALEN)) {
872 if (!tmp_level) { /* get initial values */
873 tmp_level = network->stats.signal;
874 tmp_qual = network->stats.rssi;
875 } else { /* smooth results */
876 tmp_level = (15 * tmp_level + network->stats.signal)/16;
877 tmp_qual = (15 * tmp_qual + network->stats.rssi)/16;
878 }
879 break;
880 }
881 }
882 spin_unlock_irqrestore(&mac->ieee->lock, flags);
883 wstats->qual.level = tmp_level;
884 wstats->qual.qual = 100 * tmp_qual / RX_RSSI_MAX;
854 wstats->qual.noise = bcm->stats.noise; 885 wstats->qual.noise = bcm->stats.noise;
855 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 886 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
856 IW_QUAL_NOISE_UPDATED;
857 wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable; 887 wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable;
858 wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded; 888 wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded;
859 wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa; 889 wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa;
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
index 6dbd855b3647..c0efbfe605a5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
@@ -492,16 +492,15 @@ int bcm43xx_rx(struct bcm43xx_private *bcm,
492 492
493 memset(&stats, 0, sizeof(stats)); 493 memset(&stats, 0, sizeof(stats));
494 stats.mac_time = le16_to_cpu(rxhdr->mactime); 494 stats.mac_time = le16_to_cpu(rxhdr->mactime);
495 stats.rssi = bcm43xx_rssi_postprocess(bcm, rxhdr->rssi, is_ofdm, 495 stats.rssi = rxhdr->rssi;
496 stats.signal = bcm43xx_rssi_postprocess(bcm, rxhdr->rssi, is_ofdm,
496 !!(rxflags1 & BCM43xx_RXHDR_FLAGS1_2053RSSIADJ), 497 !!(rxflags1 & BCM43xx_RXHDR_FLAGS1_2053RSSIADJ),
497 !!(rxflags3 & BCM43xx_RXHDR_FLAGS3_2050RSSIADJ)); 498 !!(rxflags3 & BCM43xx_RXHDR_FLAGS3_2050RSSIADJ));
498 stats.signal = rxhdr->signal_quality; //FIXME
499//TODO stats.noise = 499//TODO stats.noise =
500 if (is_ofdm) 500 if (is_ofdm)
501 stats.rate = bcm43xx_plcp_get_bitrate_ofdm(plcp); 501 stats.rate = bcm43xx_plcp_get_bitrate_ofdm(plcp);
502 else 502 else
503 stats.rate = bcm43xx_plcp_get_bitrate_cck(plcp); 503 stats.rate = bcm43xx_plcp_get_bitrate_cck(plcp);
504//printk("RX ofdm %d, rate == %u\n", is_ofdm, stats.rate);
505 stats.received_channel = radio->channel; 504 stats.received_channel = radio->channel;
506//TODO stats.control = 505//TODO stats.control =
507 stats.mask = IEEE80211_STATMASK_SIGNAL | 506 stats.mask = IEEE80211_STATMASK_SIGNAL |
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 52e6df5c1a92..686d895116de 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -847,6 +847,7 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
847 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), 847 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
848 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), 848 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
849 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), 849 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
850 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x0002),
850 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL", 851 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL",
851 0x74c5e40d), 852 0x74c5e40d),
852 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil", 853 PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index dafaa5ff5aa6..d500012fdc7a 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev)
1042 dev->name, local->fragm_threshold); 1042 dev->name, local->fragm_threshold);
1043 } 1043 }
1044 1044
1045 /* Some firmwares lose antenna selection settings on reset */
1046 (void) hostap_set_antsel(local);
1047
1045 return res; 1048 return res;
1046} 1049}
1047 1050
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index e955db435b30..d2db8eb412c1 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -6254,13 +6254,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6254 * member to call a function that then just turns and calls ipw2100_up. 6254 * member to call a function that then just turns and calls ipw2100_up.
6255 * net_dev->init is called after name allocation but before the 6255 * net_dev->init is called after name allocation but before the
6256 * notifier chain is called */ 6256 * notifier chain is called */
6257 mutex_lock(&priv->action_mutex);
6258 err = register_netdev(dev); 6257 err = register_netdev(dev);
6259 if (err) { 6258 if (err) {
6260 printk(KERN_WARNING DRV_NAME 6259 printk(KERN_WARNING DRV_NAME
6261 "Error calling register_netdev.\n"); 6260 "Error calling register_netdev.\n");
6262 goto fail_unlock; 6261 goto fail;
6263 } 6262 }
6263
6264 mutex_lock(&priv->action_mutex);
6264 registered = 1; 6265 registered = 1;
6265 6266
6266 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); 6267 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
@@ -6531,7 +6532,7 @@ static int __init ipw2100_init(void)
6531 printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 6532 printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
6532 printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); 6533 printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
6533 6534
6534 ret = pci_module_init(&ipw2100_pci_driver); 6535 ret = pci_register_driver(&ipw2100_pci_driver);
6535 6536
6536#ifdef CONFIG_IPW2100_DEBUG 6537#ifdef CONFIG_IPW2100_DEBUG
6537 ipw2100_debug_level = debug; 6538 ipw2100_debug_level = debug;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index b3300ffe4eec..f29ec0ebed2f 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -70,7 +70,7 @@
70#define VQ 70#define VQ
71#endif 71#endif
72 72
73#define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ 73#define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
74#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver" 74#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
75#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 75#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
76#define DRV_VERSION IPW2200_VERSION 76#define DRV_VERSION IPW2200_VERSION
@@ -83,9 +83,7 @@ MODULE_AUTHOR(DRV_COPYRIGHT);
83MODULE_LICENSE("GPL"); 83MODULE_LICENSE("GPL");
84 84
85static int cmdlog = 0; 85static int cmdlog = 0;
86#ifdef CONFIG_IPW2200_DEBUG
87static int debug = 0; 86static int debug = 0;
88#endif
89static int channel = 0; 87static int channel = 0;
90static int mode = 0; 88static int mode = 0;
91 89
@@ -567,7 +565,6 @@ static inline void ipw_disable_interrupts(struct ipw_priv *priv)
567 spin_unlock_irqrestore(&priv->irq_lock, flags); 565 spin_unlock_irqrestore(&priv->irq_lock, flags);
568} 566}
569 567
570#ifdef CONFIG_IPW2200_DEBUG
571static char *ipw_error_desc(u32 val) 568static char *ipw_error_desc(u32 val)
572{ 569{
573 switch (val) { 570 switch (val) {
@@ -634,7 +631,6 @@ static void ipw_dump_error_log(struct ipw_priv *priv,
634 error->log[i].time, 631 error->log[i].time,
635 error->log[i].data, error->log[i].event); 632 error->log[i].data, error->log[i].event);
636} 633}
637#endif
638 634
639static inline int ipw_is_init(struct ipw_priv *priv) 635static inline int ipw_is_init(struct ipw_priv *priv)
640{ 636{
@@ -1435,9 +1431,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1435 const char *buf, size_t count) 1431 const char *buf, size_t count)
1436{ 1432{
1437 struct ipw_priv *priv = dev_get_drvdata(d); 1433 struct ipw_priv *priv = dev_get_drvdata(d);
1438#ifdef CONFIG_IPW2200_DEBUG
1439 struct net_device *dev = priv->net_dev; 1434 struct net_device *dev = priv->net_dev;
1440#endif
1441 char buffer[] = "00000000"; 1435 char buffer[] = "00000000";
1442 unsigned long len = 1436 unsigned long len =
1443 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1; 1437 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
@@ -1958,14 +1952,12 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1958 IPW_WARNING("Firmware error detected. Restarting.\n"); 1952 IPW_WARNING("Firmware error detected. Restarting.\n");
1959 if (priv->error) { 1953 if (priv->error) {
1960 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n"); 1954 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
1961#ifdef CONFIG_IPW2200_DEBUG
1962 if (ipw_debug_level & IPW_DL_FW_ERRORS) { 1955 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1963 struct ipw_fw_error *error = 1956 struct ipw_fw_error *error =
1964 ipw_alloc_error_log(priv); 1957 ipw_alloc_error_log(priv);
1965 ipw_dump_error_log(priv, error); 1958 ipw_dump_error_log(priv, error);
1966 kfree(error); 1959 kfree(error);
1967 } 1960 }
1968#endif
1969 } else { 1961 } else {
1970 priv->error = ipw_alloc_error_log(priv); 1962 priv->error = ipw_alloc_error_log(priv);
1971 if (priv->error) 1963 if (priv->error)
@@ -1973,10 +1965,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
1973 else 1965 else
1974 IPW_DEBUG_FW("Error allocating sysfs 'error' " 1966 IPW_DEBUG_FW("Error allocating sysfs 'error' "
1975 "log.\n"); 1967 "log.\n");
1976#ifdef CONFIG_IPW2200_DEBUG
1977 if (ipw_debug_level & IPW_DL_FW_ERRORS) 1968 if (ipw_debug_level & IPW_DL_FW_ERRORS)
1978 ipw_dump_error_log(priv, priv->error); 1969 ipw_dump_error_log(priv, priv->error);
1979#endif
1980 } 1970 }
1981 1971
1982 /* XXX: If hardware encryption is for WPA/WPA2, 1972 /* XXX: If hardware encryption is for WPA/WPA2,
@@ -2287,7 +2277,7 @@ static int ipw_send_scan_abort(struct ipw_priv *priv)
2287static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens) 2277static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2288{ 2278{
2289 struct ipw_sensitivity_calib calib = { 2279 struct ipw_sensitivity_calib calib = {
2290 .beacon_rssi_raw = sens, 2280 .beacon_rssi_raw = cpu_to_le16(sens),
2291 }; 2281 };
2292 2282
2293 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib), 2283 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
@@ -2353,6 +2343,7 @@ static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2353 return -1; 2343 return -1;
2354 } 2344 }
2355 2345
2346 phy_off = cpu_to_le32(phy_off);
2356 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off), 2347 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
2357 &phy_off); 2348 &phy_off);
2358} 2349}
@@ -2414,7 +2405,7 @@ static int ipw_set_tx_power(struct ipw_priv *priv)
2414static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts) 2405static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2415{ 2406{
2416 struct ipw_rts_threshold rts_threshold = { 2407 struct ipw_rts_threshold rts_threshold = {
2417 .rts_threshold = rts, 2408 .rts_threshold = cpu_to_le16(rts),
2418 }; 2409 };
2419 2410
2420 if (!priv) { 2411 if (!priv) {
@@ -2429,7 +2420,7 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2429static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag) 2420static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2430{ 2421{
2431 struct ipw_frag_threshold frag_threshold = { 2422 struct ipw_frag_threshold frag_threshold = {
2432 .frag_threshold = frag, 2423 .frag_threshold = cpu_to_le16(frag),
2433 }; 2424 };
2434 2425
2435 if (!priv) { 2426 if (!priv) {
@@ -2464,6 +2455,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2464 break; 2455 break;
2465 } 2456 }
2466 2457
2458 param = cpu_to_le32(mode);
2467 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param), 2459 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2468 &param); 2460 &param);
2469} 2461}
@@ -2667,7 +2659,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
2667 2659
2668 IPW_DEBUG_FW(">> :\n"); 2660 IPW_DEBUG_FW(">> :\n");
2669 2661
2670 //set the Stop and Abort bit 2662 /* set the Stop and Abort bit */
2671 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; 2663 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2672 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2664 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2673 priv->sram_desc.last_cb_index = 0; 2665 priv->sram_desc.last_cb_index = 0;
@@ -3002,8 +2994,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3002 if (rc < 0) 2994 if (rc < 0)
3003 return rc; 2995 return rc;
3004 2996
3005// spin_lock_irqsave(&priv->lock, flags);
3006
3007 for (addr = IPW_SHARED_LOWER_BOUND; 2997 for (addr = IPW_SHARED_LOWER_BOUND;
3008 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { 2998 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3009 ipw_write32(priv, addr, 0); 2999 ipw_write32(priv, addr, 0);
@@ -3097,8 +3087,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3097 firmware have problem getting alive resp. */ 3087 firmware have problem getting alive resp. */
3098 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3088 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3099 3089
3100// spin_unlock_irqrestore(&priv->lock, flags);
3101
3102 return rc; 3090 return rc;
3103} 3091}
3104 3092
@@ -3919,7 +3907,6 @@ static const struct ipw_status_code ipw_status_codes[] = {
3919 {0x2E, "Cipher suite is rejected per security policy"}, 3907 {0x2E, "Cipher suite is rejected per security policy"},
3920}; 3908};
3921 3909
3922#ifdef CONFIG_IPW2200_DEBUG
3923static const char *ipw_get_status_code(u16 status) 3910static const char *ipw_get_status_code(u16 status)
3924{ 3911{
3925 int i; 3912 int i;
@@ -3928,7 +3915,6 @@ static const char *ipw_get_status_code(u16 status)
3928 return ipw_status_codes[i].reason; 3915 return ipw_status_codes[i].reason;
3929 return "Unknown status value."; 3916 return "Unknown status value.";
3930} 3917}
3931#endif
3932 3918
3933static void inline average_init(struct average *avg) 3919static void inline average_init(struct average *avg)
3934{ 3920{
@@ -4398,7 +4384,6 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4398 if (priv-> 4384 if (priv->
4399 status & (STATUS_ASSOCIATED | 4385 status & (STATUS_ASSOCIATED |
4400 STATUS_AUTH)) { 4386 STATUS_AUTH)) {
4401#ifdef CONFIG_IPW2200_DEBUG
4402 struct notif_authenticate *auth 4387 struct notif_authenticate *auth
4403 = &notif->u.auth; 4388 = &notif->u.auth;
4404 IPW_DEBUG(IPW_DL_NOTIF | 4389 IPW_DEBUG(IPW_DL_NOTIF |
@@ -4416,7 +4401,6 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4416 ipw_get_status_code 4401 ipw_get_status_code
4417 (ntohs 4402 (ntohs
4418 (auth->status))); 4403 (auth->status)));
4419#endif
4420 4404
4421 priv->status &= 4405 priv->status &=
4422 ~(STATUS_ASSOCIATING | 4406 ~(STATUS_ASSOCIATING |
@@ -5059,7 +5043,6 @@ static void ipw_rx_queue_replenish(void *data)
5059 } 5043 }
5060 list_del(element); 5044 list_del(element);
5061 5045
5062 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
5063 rxb->dma_addr = 5046 rxb->dma_addr =
5064 pci_map_single(priv->pci_dev, rxb->skb->data, 5047 pci_map_single(priv->pci_dev, rxb->skb->data,
5065 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5048 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -5838,8 +5821,8 @@ static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5838 key.station_index = 0; /* always 0 for BSS */ 5821 key.station_index = 0; /* always 0 for BSS */
5839 key.flags = 0; 5822 key.flags = 0;
5840 /* 0 for new key; previous value of counter (after fatal error) */ 5823 /* 0 for new key; previous value of counter (after fatal error) */
5841 key.tx_counter[0] = 0; 5824 key.tx_counter[0] = cpu_to_le32(0);
5842 key.tx_counter[1] = 0; 5825 key.tx_counter[1] = cpu_to_le32(0);
5843 5826
5844 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key); 5827 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5845} 5828}
@@ -5973,7 +5956,6 @@ static void ipw_bg_adhoc_check(void *data)
5973 mutex_unlock(&priv->mutex); 5956 mutex_unlock(&priv->mutex);
5974} 5957}
5975 5958
5976#ifdef CONFIG_IPW2200_DEBUG
5977static void ipw_debug_config(struct ipw_priv *priv) 5959static void ipw_debug_config(struct ipw_priv *priv)
5978{ 5960{
5979 IPW_DEBUG_INFO("Scan completed, no valid APs matched " 5961 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
@@ -5998,9 +5980,6 @@ static void ipw_debug_config(struct ipw_priv *priv)
5998 IPW_DEBUG_INFO("PRIVACY off\n"); 5980 IPW_DEBUG_INFO("PRIVACY off\n");
5999 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask); 5981 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6000} 5982}
6001#else
6002#define ipw_debug_config(x) do {} while (0)
6003#endif
6004 5983
6005static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode) 5984static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6006{ 5985{
@@ -6188,7 +6167,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
6188 } 6167 }
6189} 6168}
6190 6169
6191static int ipw_request_scan(struct ipw_priv *priv) 6170static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
6192{ 6171{
6193 struct ipw_scan_request_ext scan; 6172 struct ipw_scan_request_ext scan;
6194 int err = 0, scan_type; 6173 int err = 0, scan_type;
@@ -6219,19 +6198,29 @@ static int ipw_request_scan(struct ipw_priv *priv)
6219 } 6198 }
6220 6199
6221 memset(&scan, 0, sizeof(scan)); 6200 memset(&scan, 0, sizeof(scan));
6201 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6222 6202
6223 if (priv->config & CFG_SPEED_SCAN) 6203 if (type == IW_SCAN_TYPE_PASSIVE) {
6204 IPW_DEBUG_WX("use passive scanning\n");
6205 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6206 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6207 cpu_to_le16(120);
6208 ipw_add_scan_channels(priv, &scan, scan_type);
6209 goto send_request;
6210 }
6211
6212 /* Use active scan by default. */
6213 if (priv->config & CFG_SPEED_SCAN)
6224 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6214 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6225 cpu_to_le16(30); 6215 cpu_to_le16(30);
6226 else 6216 else
6227 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 6217 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6228 cpu_to_le16(20); 6218 cpu_to_le16(20);
6229 6219
6230 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 6220 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6231 cpu_to_le16(20); 6221 cpu_to_le16(20);
6232 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6233 6222
6234 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee)); 6223 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6235 6224
6236#ifdef CONFIG_IPW2200_MONITOR 6225#ifdef CONFIG_IPW2200_MONITOR
6237 if (priv->ieee->iw_mode == IW_MODE_MONITOR) { 6226 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
@@ -6268,7 +6257,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
6268 * 6257 *
6269 * TODO: Move SPEED SCAN support to all modes and bands */ 6258 * TODO: Move SPEED SCAN support to all modes and bands */
6270 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 6259 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6271 cpu_to_le16(2000); 6260 cpu_to_le16(2000);
6272 } else { 6261 } else {
6273#endif /* CONFIG_IPW2200_MONITOR */ 6262#endif /* CONFIG_IPW2200_MONITOR */
6274 /* If we are roaming, then make this a directed scan for the 6263 /* If we are roaming, then make this a directed scan for the
@@ -6294,6 +6283,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
6294 } 6283 }
6295#endif 6284#endif
6296 6285
6286send_request:
6297 err = ipw_send_scan_request_ext(priv, &scan); 6287 err = ipw_send_scan_request_ext(priv, &scan);
6298 if (err) { 6288 if (err) {
6299 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err); 6289 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
@@ -6304,11 +6294,19 @@ static int ipw_request_scan(struct ipw_priv *priv)
6304 priv->status &= ~STATUS_SCAN_PENDING; 6294 priv->status &= ~STATUS_SCAN_PENDING;
6305 queue_delayed_work(priv->workqueue, &priv->scan_check, 6295 queue_delayed_work(priv->workqueue, &priv->scan_check,
6306 IPW_SCAN_CHECK_WATCHDOG); 6296 IPW_SCAN_CHECK_WATCHDOG);
6307 done: 6297done:
6308 mutex_unlock(&priv->mutex); 6298 mutex_unlock(&priv->mutex);
6309 return err; 6299 return err;
6310} 6300}
6311 6301
6302static int ipw_request_passive_scan(struct ipw_priv *priv) {
6303 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
6304}
6305
6306static int ipw_request_scan(struct ipw_priv *priv) {
6307 return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
6308}
6309
6312static void ipw_bg_abort_scan(void *data) 6310static void ipw_bg_abort_scan(void *data)
6313{ 6311{
6314 struct ipw_priv *priv = data; 6312 struct ipw_priv *priv = data;
@@ -6387,13 +6385,6 @@ static int ipw_wx_set_genie(struct net_device *dev,
6387 (wrqu->data.length && extra == NULL)) 6385 (wrqu->data.length && extra == NULL))
6388 return -EINVAL; 6386 return -EINVAL;
6389 6387
6390 //mutex_lock(&priv->mutex);
6391
6392 //if (!ieee->wpa_enabled) {
6393 // err = -EOPNOTSUPP;
6394 // goto out;
6395 //}
6396
6397 if (wrqu->data.length) { 6388 if (wrqu->data.length) {
6398 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 6389 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6399 if (buf == NULL) { 6390 if (buf == NULL) {
@@ -6413,7 +6404,6 @@ static int ipw_wx_set_genie(struct net_device *dev,
6413 6404
6414 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6405 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6415 out: 6406 out:
6416 //mutex_unlock(&priv->mutex);
6417 return err; 6407 return err;
6418} 6408}
6419 6409
@@ -6426,13 +6416,6 @@ static int ipw_wx_get_genie(struct net_device *dev,
6426 struct ieee80211_device *ieee = priv->ieee; 6416 struct ieee80211_device *ieee = priv->ieee;
6427 int err = 0; 6417 int err = 0;
6428 6418
6429 //mutex_lock(&priv->mutex);
6430
6431 //if (!ieee->wpa_enabled) {
6432 // err = -EOPNOTSUPP;
6433 // goto out;
6434 //}
6435
6436 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { 6419 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6437 wrqu->data.length = 0; 6420 wrqu->data.length = 0;
6438 goto out; 6421 goto out;
@@ -6447,7 +6430,6 @@ static int ipw_wx_get_genie(struct net_device *dev,
6447 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6430 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6448 6431
6449 out: 6432 out:
6450 //mutex_unlock(&priv->mutex);
6451 return err; 6433 return err;
6452} 6434}
6453 6435
@@ -6558,7 +6540,6 @@ static int ipw_wx_set_auth(struct net_device *dev,
6558 ieee->ieee802_1x = param->value; 6540 ieee->ieee802_1x = param->value;
6559 break; 6541 break;
6560 6542
6561 //case IW_AUTH_ROAMING_CONTROL:
6562 case IW_AUTH_PRIVACY_INVOKED: 6543 case IW_AUTH_PRIVACY_INVOKED:
6563 ieee->privacy_invoked = param->value; 6544 ieee->privacy_invoked = param->value;
6564 break; 6545 break;
@@ -6680,7 +6661,7 @@ static int ipw_wx_set_mlme(struct net_device *dev,
6680 6661
6681 switch (mlme->cmd) { 6662 switch (mlme->cmd) {
6682 case IW_MLME_DEAUTH: 6663 case IW_MLME_DEAUTH:
6683 // silently ignore 6664 /* silently ignore */
6684 break; 6665 break;
6685 6666
6686 case IW_MLME_DISASSOC: 6667 case IW_MLME_DISASSOC:
@@ -6811,7 +6792,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
6811 burst_duration = ipw_qos_get_burst_duration(priv); 6792 burst_duration = ipw_qos_get_burst_duration(priv);
6812 for (i = 0; i < QOS_QUEUE_NUM; i++) 6793 for (i = 0; i < QOS_QUEUE_NUM; i++)
6813 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] = 6794 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6814 (u16) burst_duration; 6795 (u16)burst_duration;
6815 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 6796 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6816 if (type == IEEE_B) { 6797 if (type == IEEE_B) {
6817 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", 6798 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
@@ -6843,11 +6824,20 @@ static int ipw_qos_activate(struct ipw_priv *priv,
6843 burst_duration = ipw_qos_get_burst_duration(priv); 6824 burst_duration = ipw_qos_get_burst_duration(priv);
6844 for (i = 0; i < QOS_QUEUE_NUM; i++) 6825 for (i = 0; i < QOS_QUEUE_NUM; i++)
6845 qos_parameters[QOS_PARAM_SET_ACTIVE]. 6826 qos_parameters[QOS_PARAM_SET_ACTIVE].
6846 tx_op_limit[i] = (u16) burst_duration; 6827 tx_op_limit[i] = (u16)burst_duration;
6847 } 6828 }
6848 } 6829 }
6849 6830
6850 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n"); 6831 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6832 for (i = 0; i < 3; i++) {
6833 int j;
6834 for (j = 0; j < QOS_QUEUE_NUM; j++) {
6835 qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
6836 qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
6837 qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
6838 }
6839 }
6840
6851 err = ipw_send_qos_params_command(priv, 6841 err = ipw_send_qos_params_command(priv,
6852 (struct ieee80211_qos_parameters *) 6842 (struct ieee80211_qos_parameters *)
6853 &(qos_parameters[0])); 6843 &(qos_parameters[0]));
@@ -7086,7 +7076,7 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7086 7076
7087 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) { 7077 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7088 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD; 7078 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7089 tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK; 7079 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7090 } 7080 }
7091 return 0; 7081 return 0;
7092} 7082}
@@ -7667,7 +7657,6 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7667 /* Big bitfield of all the fields we provide in radiotap */ 7657 /* Big bitfield of all the fields we provide in radiotap */
7668 ipw_rt->rt_hdr.it_present = 7658 ipw_rt->rt_hdr.it_present =
7669 ((1 << IEEE80211_RADIOTAP_FLAGS) | 7659 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7670 (1 << IEEE80211_RADIOTAP_TSFT) |
7671 (1 << IEEE80211_RADIOTAP_RATE) | 7660 (1 << IEEE80211_RADIOTAP_RATE) |
7672 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7661 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7673 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7662 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7676,6 +7665,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7676 7665
7677 /* Zero the flags, we'll add to them as we go */ 7666 /* Zero the flags, we'll add to them as we go */
7678 ipw_rt->rt_flags = 0; 7667 ipw_rt->rt_flags = 0;
7668 ipw_rt->rt_tsf = 0ULL;
7679 7669
7680 /* Convert signal to DBM */ 7670 /* Convert signal to DBM */
7681 ipw_rt->rt_dbmsignal = antsignal; 7671 ipw_rt->rt_dbmsignal = antsignal;
@@ -7794,7 +7784,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7794 s8 noise = frame->noise; 7784 s8 noise = frame->noise;
7795 u8 rate = frame->rate; 7785 u8 rate = frame->rate;
7796 short len = le16_to_cpu(pkt->u.frame.length); 7786 short len = le16_to_cpu(pkt->u.frame.length);
7797 u64 tsf = 0;
7798 struct sk_buff *skb; 7787 struct sk_buff *skb;
7799 int hdr_only = 0; 7788 int hdr_only = 0;
7800 u16 filter = priv->prom_priv->filter; 7789 u16 filter = priv->prom_priv->filter;
@@ -7829,17 +7818,17 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7829 } 7818 }
7830 7819
7831 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE; 7820 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7832 if (ieee80211_is_management(hdr->frame_ctl)) { 7821 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
7833 if (filter & IPW_PROM_NO_MGMT) 7822 if (filter & IPW_PROM_NO_MGMT)
7834 return; 7823 return;
7835 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 7824 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7836 hdr_only = 1; 7825 hdr_only = 1;
7837 } else if (ieee80211_is_control(hdr->frame_ctl)) { 7826 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
7838 if (filter & IPW_PROM_NO_CTL) 7827 if (filter & IPW_PROM_NO_CTL)
7839 return; 7828 return;
7840 if (filter & IPW_PROM_CTL_HEADER_ONLY) 7829 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7841 hdr_only = 1; 7830 hdr_only = 1;
7842 } else if (ieee80211_is_data(hdr->frame_ctl)) { 7831 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
7843 if (filter & IPW_PROM_NO_DATA) 7832 if (filter & IPW_PROM_NO_DATA)
7844 return; 7833 return;
7845 if (filter & IPW_PROM_DATA_HEADER_ONLY) 7834 if (filter & IPW_PROM_DATA_HEADER_ONLY)
@@ -7857,7 +7846,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7857 ipw_rt = (void *)skb->data; 7846 ipw_rt = (void *)skb->data;
7858 7847
7859 if (hdr_only) 7848 if (hdr_only)
7860 len = ieee80211_get_hdrlen(hdr->frame_ctl); 7849 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
7861 7850
7862 memcpy(ipw_rt->payload, hdr, len); 7851 memcpy(ipw_rt->payload, hdr, len);
7863 7852
@@ -7880,7 +7869,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7880 /* Big bitfield of all the fields we provide in radiotap */ 7869 /* Big bitfield of all the fields we provide in radiotap */
7881 ipw_rt->rt_hdr.it_present = 7870 ipw_rt->rt_hdr.it_present =
7882 ((1 << IEEE80211_RADIOTAP_FLAGS) | 7871 ((1 << IEEE80211_RADIOTAP_FLAGS) |
7883 (1 << IEEE80211_RADIOTAP_TSFT) |
7884 (1 << IEEE80211_RADIOTAP_RATE) | 7872 (1 << IEEE80211_RADIOTAP_RATE) |
7885 (1 << IEEE80211_RADIOTAP_CHANNEL) | 7873 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7886 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | 7874 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@@ -7889,8 +7877,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7889 7877
7890 /* Zero the flags, we'll add to them as we go */ 7878 /* Zero the flags, we'll add to them as we go */
7891 ipw_rt->rt_flags = 0; 7879 ipw_rt->rt_flags = 0;
7892 7880 ipw_rt->rt_tsf = 0ULL;
7893 ipw_rt->rt_tsf = tsf;
7894 7881
7895 /* Convert to DBM */ 7882 /* Convert to DBM */
7896 ipw_rt->rt_dbmsignal = signal; 7883 ipw_rt->rt_dbmsignal = signal;
@@ -8163,8 +8150,7 @@ static void ipw_rx(struct ipw_priv *priv)
8163 switch (pkt->header.message_type) { 8150 switch (pkt->header.message_type) {
8164 case RX_FRAME_TYPE: /* 802.11 frame */ { 8151 case RX_FRAME_TYPE: /* 802.11 frame */ {
8165 struct ieee80211_rx_stats stats = { 8152 struct ieee80211_rx_stats stats = {
8166 .rssi = 8153 .rssi = pkt->u.frame.rssi_dbm -
8167 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8168 IPW_RSSI_TO_DBM, 8154 IPW_RSSI_TO_DBM,
8169 .signal = 8155 .signal =
8170 le16_to_cpu(pkt->u.frame.rssi_dbm) - 8156 le16_to_cpu(pkt->u.frame.rssi_dbm) -
@@ -8599,9 +8585,26 @@ static int ipw_wx_get_freq(struct net_device *dev,
8599 * configured CHANNEL then return that; otherwise return ANY */ 8585 * configured CHANNEL then return that; otherwise return ANY */
8600 mutex_lock(&priv->mutex); 8586 mutex_lock(&priv->mutex);
8601 if (priv->config & CFG_STATIC_CHANNEL || 8587 if (priv->config & CFG_STATIC_CHANNEL ||
8602 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) 8588 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8603 wrqu->freq.m = priv->channel; 8589 int i;
8604 else 8590
8591 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8592 BUG_ON(i == -1);
8593 wrqu->freq.e = 1;
8594
8595 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8596 case IEEE80211_52GHZ_BAND:
8597 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8598 break;
8599
8600 case IEEE80211_24GHZ_BAND:
8601 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8602 break;
8603
8604 default:
8605 BUG();
8606 }
8607 } else
8605 wrqu->freq.m = 0; 8608 wrqu->freq.m = 0;
8606 8609
8607 mutex_unlock(&priv->mutex); 8610 mutex_unlock(&priv->mutex);
@@ -8857,42 +8860,38 @@ static int ipw_wx_set_essid(struct net_device *dev,
8857 union iwreq_data *wrqu, char *extra) 8860 union iwreq_data *wrqu, char *extra)
8858{ 8861{
8859 struct ipw_priv *priv = ieee80211_priv(dev); 8862 struct ipw_priv *priv = ieee80211_priv(dev);
8860 char *essid = ""; /* ANY */ 8863 int length;
8861 int length = 0;
8862 mutex_lock(&priv->mutex);
8863 if (wrqu->essid.flags && wrqu->essid.length) {
8864 length = wrqu->essid.length - 1;
8865 essid = extra;
8866 }
8867 if (length == 0) {
8868 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8869 if ((priv->config & CFG_STATIC_ESSID) &&
8870 !(priv->status & (STATUS_ASSOCIATED |
8871 STATUS_ASSOCIATING))) {
8872 IPW_DEBUG_ASSOC("Attempting to associate with new "
8873 "parameters.\n");
8874 priv->config &= ~CFG_STATIC_ESSID;
8875 ipw_associate(priv);
8876 }
8877 mutex_unlock(&priv->mutex);
8878 return 0;
8879 }
8880 8864
8881 length = min(length, IW_ESSID_MAX_SIZE); 8865 mutex_lock(&priv->mutex);
8866
8867 if (!wrqu->essid.flags)
8868 {
8869 IPW_DEBUG_WX("Setting ESSID to ANY\n");
8870 ipw_disassociate(priv);
8871 priv->config &= ~CFG_STATIC_ESSID;
8872 ipw_associate(priv);
8873 mutex_unlock(&priv->mutex);
8874 return 0;
8875 }
8876
8877 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
8878 if (!extra[length - 1])
8879 length--;
8882 8880
8883 priv->config |= CFG_STATIC_ESSID; 8881 priv->config |= CFG_STATIC_ESSID;
8884 8882
8885 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { 8883 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
8884 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
8886 IPW_DEBUG_WX("ESSID set to current ESSID.\n"); 8885 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
8887 mutex_unlock(&priv->mutex); 8886 mutex_unlock(&priv->mutex);
8888 return 0; 8887 return 0;
8889 } 8888 }
8890 8889
8891 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length), 8890 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
8892 length); 8891 length);
8893 8892
8894 priv->essid_len = length; 8893 priv->essid_len = length;
8895 memcpy(priv->essid, essid, priv->essid_len); 8894 memcpy(priv->essid, extra, priv->essid_len);
8896 8895
8897 /* Network configuration changed -- force [re]association */ 8896 /* Network configuration changed -- force [re]association */
8898 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n"); 8897 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
@@ -9273,7 +9272,7 @@ static int ipw_wx_set_retry(struct net_device *dev,
9273 if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) 9272 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9274 return 0; 9273 return 0;
9275 9274
9276 if (wrqu->retry.value < 0 || wrqu->retry.value > 255) 9275 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9277 return -EINVAL; 9276 return -EINVAL;
9278 9277
9279 mutex_lock(&priv->mutex); 9278 mutex_lock(&priv->mutex);
@@ -9396,15 +9395,19 @@ static int ipw_wx_set_scan(struct net_device *dev,
9396 union iwreq_data *wrqu, char *extra) 9395 union iwreq_data *wrqu, char *extra)
9397{ 9396{
9398 struct ipw_priv *priv = ieee80211_priv(dev); 9397 struct ipw_priv *priv = ieee80211_priv(dev);
9399 struct iw_scan_req *req = NULL; 9398 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9400 if (wrqu->data.length 9399
9401 && wrqu->data.length == sizeof(struct iw_scan_req)) { 9400 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9402 req = (struct iw_scan_req *)extra;
9403 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) { 9401 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9404 ipw_request_direct_scan(priv, req->essid, 9402 ipw_request_direct_scan(priv, req->essid,
9405 req->essid_len); 9403 req->essid_len);
9406 return 0; 9404 return 0;
9407 } 9405 }
9406 if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9407 queue_work(priv->workqueue,
9408 &priv->request_passive_scan);
9409 return 0;
9410 }
9408 } 9411 }
9409 9412
9410 IPW_DEBUG_WX("Start scan\n"); 9413 IPW_DEBUG_WX("Start scan\n");
@@ -9766,7 +9769,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9766 return 0; 9769 return 0;
9767} 9770}
9768 9771
9769#endif // CONFIG_IPW2200_MONITOR 9772#endif /* CONFIG_IPW2200_MONITOR */
9770 9773
9771static int ipw_wx_reset(struct net_device *dev, 9774static int ipw_wx_reset(struct net_device *dev,
9772 struct iw_request_info *info, 9775 struct iw_request_info *info,
@@ -10009,7 +10012,7 @@ static void init_sys_config(struct ipw_sys_config *sys_config)
10009 sys_config->dot11g_auto_detection = 0; 10012 sys_config->dot11g_auto_detection = 0;
10010 sys_config->enable_cts_to_self = 0; 10013 sys_config->enable_cts_to_self = 0;
10011 sys_config->bt_coexist_collision_thr = 0; 10014 sys_config->bt_coexist_collision_thr = 0;
10012 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 10015 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10013 sys_config->silence_threshold = 0x1e; 10016 sys_config->silence_threshold = 0x1e;
10014} 10017}
10015 10018
@@ -10113,7 +10116,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10113 switch (priv->ieee->sec.level) { 10116 switch (priv->ieee->sec.level) {
10114 case SEC_LEVEL_3: 10117 case SEC_LEVEL_3:
10115 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10118 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10116 IEEE80211_FCTL_PROTECTED; 10119 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10117 /* XXX: ACK flag must be set for CCMP even if it 10120 /* XXX: ACK flag must be set for CCMP even if it
10118 * is a multicast/broadcast packet, because CCMP 10121 * is a multicast/broadcast packet, because CCMP
10119 * group communication encrypted by GTK is 10122 * group communication encrypted by GTK is
@@ -10128,14 +10131,14 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10128 break; 10131 break;
10129 case SEC_LEVEL_2: 10132 case SEC_LEVEL_2:
10130 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10133 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10131 IEEE80211_FCTL_PROTECTED; 10134 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10132 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP; 10135 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10133 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP; 10136 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10134 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE; 10137 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10135 break; 10138 break;
10136 case SEC_LEVEL_1: 10139 case SEC_LEVEL_1:
10137 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |= 10140 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10138 IEEE80211_FCTL_PROTECTED; 10141 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10139 tfd->u.data.key_index = priv->ieee->tx_keyidx; 10142 tfd->u.data.key_index = priv->ieee->tx_keyidx;
10140 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <= 10143 if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
10141 40) 10144 40)
@@ -10267,17 +10270,17 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10267 10270
10268 /* Filtering of fragment chains is done agains the first fragment */ 10271 /* Filtering of fragment chains is done agains the first fragment */
10269 hdr = (void *)txb->fragments[0]->data; 10272 hdr = (void *)txb->fragments[0]->data;
10270 if (ieee80211_is_management(hdr->frame_ctl)) { 10273 if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
10271 if (filter & IPW_PROM_NO_MGMT) 10274 if (filter & IPW_PROM_NO_MGMT)
10272 return; 10275 return;
10273 if (filter & IPW_PROM_MGMT_HEADER_ONLY) 10276 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10274 hdr_only = 1; 10277 hdr_only = 1;
10275 } else if (ieee80211_is_control(hdr->frame_ctl)) { 10278 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
10276 if (filter & IPW_PROM_NO_CTL) 10279 if (filter & IPW_PROM_NO_CTL)
10277 return; 10280 return;
10278 if (filter & IPW_PROM_CTL_HEADER_ONLY) 10281 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10279 hdr_only = 1; 10282 hdr_only = 1;
10280 } else if (ieee80211_is_data(hdr->frame_ctl)) { 10283 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
10281 if (filter & IPW_PROM_NO_DATA) 10284 if (filter & IPW_PROM_NO_DATA)
10282 return; 10285 return;
10283 if (filter & IPW_PROM_DATA_HEADER_ONLY) 10286 if (filter & IPW_PROM_DATA_HEADER_ONLY)
@@ -10292,7 +10295,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10292 10295
10293 if (hdr_only) { 10296 if (hdr_only) {
10294 hdr = (void *)src->data; 10297 hdr = (void *)src->data;
10295 len = ieee80211_get_hdrlen(hdr->frame_ctl); 10298 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10296 } else 10299 } else
10297 len = src->len; 10300 len = src->len;
10298 10301
@@ -10636,6 +10639,8 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
10636 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv); 10639 INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
10637 INIT_WORK(&priv->request_scan, 10640 INIT_WORK(&priv->request_scan,
10638 (void (*)(void *))ipw_request_scan, priv); 10641 (void (*)(void *))ipw_request_scan, priv);
10642 INIT_WORK(&priv->request_passive_scan,
10643 (void (*)(void *))ipw_request_passive_scan, priv);
10639 INIT_WORK(&priv->gather_stats, 10644 INIT_WORK(&priv->gather_stats,
10640 (void (*)(void *))ipw_bg_gather_stats, priv); 10645 (void (*)(void *))ipw_bg_gather_stats, priv);
10641 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv); 10646 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
@@ -11488,9 +11493,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
11488 11493
11489 priv->net_dev = net_dev; 11494 priv->net_dev = net_dev;
11490 priv->pci_dev = pdev; 11495 priv->pci_dev = pdev;
11491#ifdef CONFIG_IPW2200_DEBUG
11492 ipw_debug_level = debug; 11496 ipw_debug_level = debug;
11493#endif
11494 spin_lock_init(&priv->irq_lock); 11497 spin_lock_init(&priv->irq_lock);
11495 spin_lock_init(&priv->lock); 11498 spin_lock_init(&priv->lock);
11496 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) 11499 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
@@ -11755,6 +11758,16 @@ static int ipw_pci_resume(struct pci_dev *pdev)
11755} 11758}
11756#endif 11759#endif
11757 11760
11761static void ipw_pci_shutdown(struct pci_dev *pdev)
11762{
11763 struct ipw_priv *priv = pci_get_drvdata(pdev);
11764
11765 /* Take down the device; powers it off, etc. */
11766 ipw_down(priv);
11767
11768 pci_disable_device(pdev);
11769}
11770
11758/* driver initialization stuff */ 11771/* driver initialization stuff */
11759static struct pci_driver ipw_driver = { 11772static struct pci_driver ipw_driver = {
11760 .name = DRV_NAME, 11773 .name = DRV_NAME,
@@ -11765,6 +11778,7 @@ static struct pci_driver ipw_driver = {
11765 .suspend = ipw_pci_suspend, 11778 .suspend = ipw_pci_suspend,
11766 .resume = ipw_pci_resume, 11779 .resume = ipw_pci_resume,
11767#endif 11780#endif
11781 .shutdown = ipw_pci_shutdown,
11768}; 11782};
11769 11783
11770static int __init ipw_init(void) 11784static int __init ipw_init(void)
@@ -11774,7 +11788,7 @@ static int __init ipw_init(void)
11774 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 11788 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11775 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 11789 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11776 11790
11777 ret = pci_module_init(&ipw_driver); 11791 ret = pci_register_driver(&ipw_driver);
11778 if (ret) { 11792 if (ret) {
11779 IPW_ERROR("Unable to initialize PCI module\n"); 11793 IPW_ERROR("Unable to initialize PCI module\n");
11780 return ret; 11794 return ret;
@@ -11808,10 +11822,8 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11808module_param(led, int, 0444); 11822module_param(led, int, 0444);
11809MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n"); 11823MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
11810 11824
11811#ifdef CONFIG_IPW2200_DEBUG
11812module_param(debug, int, 0444); 11825module_param(debug, int, 0444);
11813MODULE_PARM_DESC(debug, "debug output mask"); 11826MODULE_PARM_DESC(debug, "debug output mask");
11814#endif
11815 11827
11816module_param(channel, int, 0444); 11828module_param(channel, int, 0444);
11817MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])"); 11829MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index 8b1cd7c749a4..dad5eedefbf1 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -713,7 +713,6 @@ struct ipw_rx_packet {
713 713
714struct ipw_rx_mem_buffer { 714struct ipw_rx_mem_buffer {
715 dma_addr_t dma_addr; 715 dma_addr_t dma_addr;
716 struct ipw_rx_buffer *rxb;
717 struct sk_buff *skb; 716 struct sk_buff *skb;
718 struct list_head list; 717 struct list_head list;
719}; /* Not transferred over network, so not __attribute__ ((packed)) */ 718}; /* Not transferred over network, so not __attribute__ ((packed)) */
@@ -1297,6 +1296,7 @@ struct ipw_priv {
1297 struct work_struct system_config; 1296 struct work_struct system_config;
1298 struct work_struct rx_replenish; 1297 struct work_struct rx_replenish;
1299 struct work_struct request_scan; 1298 struct work_struct request_scan;
1299 struct work_struct request_passive_scan;
1300 struct work_struct adapter_restart; 1300 struct work_struct adapter_restart;
1301 struct work_struct rf_kill; 1301 struct work_struct rf_kill;
1302 struct work_struct up; 1302 struct work_struct up;
@@ -1381,13 +1381,18 @@ BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
1381BIT_ARG16(x) 1381BIT_ARG16(x)
1382 1382
1383 1383
1384#ifdef CONFIG_IPW2200_DEBUG
1385#define IPW_DEBUG(level, fmt, args...) \ 1384#define IPW_DEBUG(level, fmt, args...) \
1386do { if (ipw_debug_level & (level)) \ 1385do { if (ipw_debug_level & (level)) \
1387 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \ 1386 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1388 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 1387 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
1388
1389#ifdef CONFIG_IPW2200_DEBUG
1390#define IPW_LL_DEBUG(level, fmt, args...) \
1391do { if (ipw_debug_level & (level)) \
1392 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1393 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
1389#else 1394#else
1390#define IPW_DEBUG(level, fmt, args...) do {} while (0) 1395#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
1391#endif /* CONFIG_IPW2200_DEBUG */ 1396#endif /* CONFIG_IPW2200_DEBUG */
1392 1397
1393/* 1398/*
@@ -1457,28 +1462,27 @@ do { if (ipw_debug_level & (level)) \
1457 1462
1458#define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a) 1463#define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a)
1459#define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a) 1464#define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a)
1460#define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a) 1465#define IPW_DEBUG_TRACE(f, a...) IPW_LL_DEBUG(IPW_DL_TRACE, f, ## a)
1461#define IPW_DEBUG_TRACE(f, a...) IPW_DEBUG(IPW_DL_TRACE, f, ## a) 1466#define IPW_DEBUG_RX(f, a...) IPW_LL_DEBUG(IPW_DL_RX, f, ## a)
1462#define IPW_DEBUG_RX(f, a...) IPW_DEBUG(IPW_DL_RX, f, ## a) 1467#define IPW_DEBUG_TX(f, a...) IPW_LL_DEBUG(IPW_DL_TX, f, ## a)
1463#define IPW_DEBUG_TX(f, a...) IPW_DEBUG(IPW_DL_TX, f, ## a) 1468#define IPW_DEBUG_ISR(f, a...) IPW_LL_DEBUG(IPW_DL_ISR, f, ## a)
1464#define IPW_DEBUG_ISR(f, a...) IPW_DEBUG(IPW_DL_ISR, f, ## a)
1465#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a) 1469#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
1466#define IPW_DEBUG_LED(f, a...) IPW_DEBUG(IPW_DL_LED, f, ## a) 1470#define IPW_DEBUG_LED(f, a...) IPW_LL_DEBUG(IPW_DL_LED, f, ## a)
1467#define IPW_DEBUG_WEP(f, a...) IPW_DEBUG(IPW_DL_WEP, f, ## a) 1471#define IPW_DEBUG_WEP(f, a...) IPW_LL_DEBUG(IPW_DL_WEP, f, ## a)
1468#define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a) 1472#define IPW_DEBUG_HC(f, a...) IPW_LL_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
1469#define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a) 1473#define IPW_DEBUG_FRAG(f, a...) IPW_LL_DEBUG(IPW_DL_FRAG, f, ## a)
1470#define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a) 1474#define IPW_DEBUG_FW(f, a...) IPW_LL_DEBUG(IPW_DL_FW, f, ## a)
1471#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a) 1475#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
1472#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a) 1476#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
1473#define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a) 1477#define IPW_DEBUG_IO(f, a...) IPW_LL_DEBUG(IPW_DL_IO, f, ## a)
1474#define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a) 1478#define IPW_DEBUG_ORD(f, a...) IPW_LL_DEBUG(IPW_DL_ORD, f, ## a)
1475#define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a) 1479#define IPW_DEBUG_FW_INFO(f, a...) IPW_LL_DEBUG(IPW_DL_FW_INFO, f, ## a)
1476#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a) 1480#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
1477#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) 1481#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
1478#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a) 1482#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
1479#define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a) 1483#define IPW_DEBUG_STATS(f, a...) IPW_LL_DEBUG(IPW_DL_STATS, f, ## a)
1480#define IPW_DEBUG_MERGE(f, a...) IPW_DEBUG(IPW_DL_MERGE, f, ## a) 1484#define IPW_DEBUG_MERGE(f, a...) IPW_LL_DEBUG(IPW_DL_MERGE, f, ## a)
1481#define IPW_DEBUG_QOS(f, a...) IPW_DEBUG(IPW_DL_QOS, f, ## a) 1485#define IPW_DEBUG_QOS(f, a...) IPW_LL_DEBUG(IPW_DL_QOS, f, ## a)
1482 1486
1483#include <linux/ctype.h> 1487#include <linux/ctype.h>
1484 1488
@@ -1947,10 +1951,17 @@ struct host_cmd {
1947 u32 *param; 1951 u32 *param;
1948} __attribute__ ((packed)); 1952} __attribute__ ((packed));
1949 1953
1954struct cmdlog_host_cmd {
1955 u8 cmd;
1956 u8 len;
1957 u16 reserved;
1958 char param[124];
1959} __attribute__ ((packed));
1960
1950struct ipw_cmd_log { 1961struct ipw_cmd_log {
1951 unsigned long jiffies; 1962 unsigned long jiffies;
1952 int retcode; 1963 int retcode;
1953 struct host_cmd cmd; 1964 struct cmdlog_host_cmd cmd;
1954}; 1965};
1955 1966
1956/* SysConfig command parameters ... */ 1967/* SysConfig command parameters ... */
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index d6ed5781b93a..1174ff53e025 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -82,6 +82,7 @@
82#include <linux/netdevice.h> 82#include <linux/netdevice.h>
83#include <linux/etherdevice.h> 83#include <linux/etherdevice.h>
84#include <linux/ethtool.h> 84#include <linux/ethtool.h>
85#include <linux/if_arp.h>
85#include <linux/wireless.h> 86#include <linux/wireless.h>
86#include <net/iw_handler.h> 87#include <net/iw_handler.h>
87#include <net/ieee80211.h> 88#include <net/ieee80211.h>
@@ -2875,7 +2876,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2875 if (orinoco_lock(priv, &flags) != 0) 2876 if (orinoco_lock(priv, &flags) != 0)
2876 return -EBUSY; 2877 return -EBUSY;
2877 2878
2878 if (erq->pointer) { 2879 if (erq->length > 0) {
2879 if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) 2880 if ((index < 0) || (index >= ORINOCO_MAX_KEYS))
2880 index = priv->tx_key; 2881 index = priv->tx_key;
2881 2882
@@ -2918,7 +2919,7 @@ static int orinoco_ioctl_setiwencode(struct net_device *dev,
2918 if (erq->flags & IW_ENCODE_RESTRICTED) 2919 if (erq->flags & IW_ENCODE_RESTRICTED)
2919 restricted = 1; 2920 restricted = 1;
2920 2921
2921 if (erq->pointer) { 2922 if (erq->pointer && erq->length > 0) {
2922 priv->keys[index].len = cpu_to_le16(xlen); 2923 priv->keys[index].len = cpu_to_le16(xlen);
2923 memset(priv->keys[index].data, 0, 2924 memset(priv->keys[index].data, 0,
2924 sizeof(priv->keys[index].data)); 2925 sizeof(priv->keys[index].data));
diff --git a/drivers/net/wireless/orinoco.h b/drivers/net/wireless/orinoco.h
index 16db3e14b7d2..fb5700d6c454 100644
--- a/drivers/net/wireless/orinoco.h
+++ b/drivers/net/wireless/orinoco.h
@@ -134,11 +134,7 @@ extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *reg
134/* Locking and synchronization functions */ 134/* Locking and synchronization functions */
135/********************************************************************/ 135/********************************************************************/
136 136
137/* These functions *must* be inline or they will break horribly on 137static inline int orinoco_lock(struct orinoco_private *priv,
138 * SPARC, due to its weird semantics for save/restore flags. extern
139 * inline should prevent the kernel from linking or module from
140 * loading if they are not inlined. */
141extern inline int orinoco_lock(struct orinoco_private *priv,
142 unsigned long *flags) 138 unsigned long *flags)
143{ 139{
144 spin_lock_irqsave(&priv->lock, *flags); 140 spin_lock_irqsave(&priv->lock, *flags);
@@ -151,7 +147,7 @@ extern inline int orinoco_lock(struct orinoco_private *priv,
151 return 0; 147 return 0;
152} 148}
153 149
154extern inline void orinoco_unlock(struct orinoco_private *priv, 150static inline void orinoco_unlock(struct orinoco_private *priv,
155 unsigned long *flags) 151 unsigned long *flags)
156{ 152{
157 spin_unlock_irqrestore(&priv->lock, *flags); 153 spin_unlock_irqrestore(&priv->lock, *flags);
diff --git a/drivers/net/wireless/orinoco_nortel.c b/drivers/net/wireless/orinoco_nortel.c
index bf05b907747e..eaf3d13b851c 100644
--- a/drivers/net/wireless/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco_nortel.c
@@ -304,7 +304,7 @@ MODULE_LICENSE("Dual MPL/GPL");
304static int __init orinoco_nortel_init(void) 304static int __init orinoco_nortel_init(void)
305{ 305{
306 printk(KERN_DEBUG "%s\n", version); 306 printk(KERN_DEBUG "%s\n", version);
307 return pci_module_init(&orinoco_nortel_driver); 307 return pci_register_driver(&orinoco_nortel_driver);
308} 308}
309 309
310static void __exit orinoco_nortel_exit(void) 310static void __exit orinoco_nortel_exit(void)
diff --git a/drivers/net/wireless/orinoco_pci.c b/drivers/net/wireless/orinoco_pci.c
index 1759c543fbee..97a8b4ff32bd 100644
--- a/drivers/net/wireless/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco_pci.c
@@ -244,7 +244,7 @@ MODULE_LICENSE("Dual MPL/GPL");
244static int __init orinoco_pci_init(void) 244static int __init orinoco_pci_init(void)
245{ 245{
246 printk(KERN_DEBUG "%s\n", version); 246 printk(KERN_DEBUG "%s\n", version);
247 return pci_module_init(&orinoco_pci_driver); 247 return pci_register_driver(&orinoco_pci_driver);
248} 248}
249 249
250static void __exit orinoco_pci_exit(void) 250static void __exit orinoco_pci_exit(void)
diff --git a/drivers/net/wireless/orinoco_plx.c b/drivers/net/wireless/orinoco_plx.c
index 7f006f624171..31162ac25a92 100644
--- a/drivers/net/wireless/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco_plx.c
@@ -351,7 +351,7 @@ MODULE_LICENSE("Dual MPL/GPL");
351static int __init orinoco_plx_init(void) 351static int __init orinoco_plx_init(void)
352{ 352{
353 printk(KERN_DEBUG "%s\n", version); 353 printk(KERN_DEBUG "%s\n", version);
354 return pci_module_init(&orinoco_plx_driver); 354 return pci_register_driver(&orinoco_plx_driver);
355} 355}
356 356
357static void __exit orinoco_plx_exit(void) 357static void __exit orinoco_plx_exit(void)
diff --git a/drivers/net/wireless/orinoco_tmd.c b/drivers/net/wireless/orinoco_tmd.c
index 0831721e4d6c..7c7b960c91df 100644
--- a/drivers/net/wireless/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco_tmd.c
@@ -228,7 +228,7 @@ MODULE_LICENSE("Dual MPL/GPL");
228static int __init orinoco_tmd_init(void) 228static int __init orinoco_tmd_init(void)
229{ 229{
230 printk(KERN_DEBUG "%s\n", version); 230 printk(KERN_DEBUG "%s\n", version);
231 return pci_module_init(&orinoco_tmd_driver); 231 return pci_register_driver(&orinoco_tmd_driver);
232} 232}
233 233
234static void __exit orinoco_tmd_exit(void) 234static void __exit orinoco_tmd_exit(void)
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 989599ad33ef..c09fbf733b3a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -35,13 +35,21 @@
35 35
36#include <net/iw_handler.h> /* New driver API */ 36#include <net/iw_handler.h> /* New driver API */
37 37
38#define KEY_SIZE_WEP104 13 /* 104/128-bit WEP keys */
39#define KEY_SIZE_WEP40 5 /* 40/64-bit WEP keys */
40/* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */
41#define KEY_SIZE_TKIP 32 /* TKIP keys */
38 42
39static void prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, 43static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
40 u8 *wpa_ie, size_t wpa_ie_len); 44 u8 *wpa_ie, size_t wpa_ie_len);
41static size_t prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); 45static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
42static int prism54_set_wpa(struct net_device *, struct iw_request_info *, 46static int prism54_set_wpa(struct net_device *, struct iw_request_info *,
43 __u32 *, char *); 47 __u32 *, char *);
44 48
49/* In 500 kbps */
50static const unsigned char scan_rate_list[] = { 2, 4, 11, 22,
51 12, 18, 24, 36,
52 48, 72, 96, 108 };
45 53
46/** 54/**
47 * prism54_mib_mode_helper - MIB change mode helper function 55 * prism54_mib_mode_helper - MIB change mode helper function
@@ -468,6 +476,9 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
468 range->event_capa[1] = IW_EVENT_CAPA_K_1; 476 range->event_capa[1] = IW_EVENT_CAPA_K_1;
469 range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); 477 range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
470 478
479 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
480 IW_ENC_CAPA_CIPHER_TKIP;
481
471 if (islpci_get_state(priv) < PRV_STATE_INIT) 482 if (islpci_get_state(priv) < PRV_STATE_INIT)
472 return 0; 483 return 0;
473 484
@@ -567,6 +578,8 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev,
567 struct iw_event iwe; /* Temporary buffer */ 578 struct iw_event iwe; /* Temporary buffer */
568 short cap; 579 short cap;
569 islpci_private *priv = netdev_priv(ndev); 580 islpci_private *priv = netdev_priv(ndev);
581 u8 wpa_ie[MAX_WPA_IE_LEN];
582 size_t wpa_ie_len;
570 583
571 /* The first entry must be the MAC address */ 584 /* The first entry must be the MAC address */
572 memcpy(iwe.u.ap_addr.sa_data, bss->address, 6); 585 memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
@@ -627,28 +640,40 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev,
627 current_ev = 640 current_ev =
628 iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); 641 iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
629 642
630 if (priv->wpa) { 643 /* Add WPA/RSN Information Element, if any */
631 u8 wpa_ie[MAX_WPA_IE_LEN]; 644 wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
632 char *buf, *p; 645 if (wpa_ie_len > 0) {
633 size_t wpa_ie_len; 646 iwe.cmd = IWEVGENIE;
647 iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
648 current_ev = iwe_stream_add_point(current_ev, end_buf,
649 &iwe, wpa_ie);
650 }
651 /* Do the bitrates */
652 {
653 char * current_val = current_ev + IW_EV_LCP_LEN;
634 int i; 654 int i;
635 655 int mask;
636 wpa_ie_len = prism54_wpa_ie_get(priv, bss->address, wpa_ie); 656
637 if (wpa_ie_len > 0 && 657 iwe.cmd = SIOCGIWRATE;
638 (buf = kmalloc(wpa_ie_len * 2 + 10, GFP_ATOMIC))) { 658 /* Those two flags are ignored... */
639 p = buf; 659 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
640 p += sprintf(p, "wpa_ie="); 660
641 for (i = 0; i < wpa_ie_len; i++) { 661 /* Parse the bitmask */
642 p += sprintf(p, "%02x", wpa_ie[i]); 662 mask = 0x1;
663 for(i = 0; i < sizeof(scan_rate_list); i++) {
664 if(bss->rates & mask) {
665 iwe.u.bitrate.value = (scan_rate_list[i] * 500000);
666 current_val = iwe_stream_add_value(current_ev, current_val,
667 end_buf, &iwe,
668 IW_EV_PARAM_LEN);
643 } 669 }
644 memset(&iwe, 0, sizeof (iwe)); 670 mask <<= 1;
645 iwe.cmd = IWEVCUSTOM;
646 iwe.u.data.length = strlen(buf);
647 current_ev = iwe_stream_add_point(current_ev, end_buf,
648 &iwe, buf);
649 kfree(buf);
650 } 671 }
672 /* Check if we added any event */
673 if ((current_val - current_ev) > IW_EV_LCP_LEN)
674 current_ev = current_val;
651 } 675 }
676
652 return current_ev; 677 return current_ev;
653} 678}
654 679
@@ -1051,12 +1076,24 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
1051 current_index = r.u; 1076 current_index = r.u;
1052 /* Verify that the key is not marked as invalid */ 1077 /* Verify that the key is not marked as invalid */
1053 if (!(dwrq->flags & IW_ENCODE_NOKEY)) { 1078 if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
1054 key.length = dwrq->length > sizeof (key.key) ? 1079 if (dwrq->length > KEY_SIZE_TKIP) {
1055 sizeof (key.key) : dwrq->length; 1080 /* User-provided key data too big */
1056 memcpy(key.key, extra, key.length); 1081 return -EINVAL;
1057 if (key.length == 32) 1082 }
1058 /* we want WPA-PSK */ 1083 if (dwrq->length > KEY_SIZE_WEP104) {
1084 /* WPA-PSK TKIP */
1059 key.type = DOT11_PRIV_TKIP; 1085 key.type = DOT11_PRIV_TKIP;
1086 key.length = KEY_SIZE_TKIP;
1087 } else if (dwrq->length > KEY_SIZE_WEP40) {
1088 /* WEP 104/128 */
1089 key.length = KEY_SIZE_WEP104;
1090 } else {
1091 /* WEP 40/64 */
1092 key.length = KEY_SIZE_WEP40;
1093 }
1094 memset(key.key, 0, sizeof (key.key));
1095 memcpy(key.key, extra, dwrq->length);
1096
1060 if ((index < 0) || (index > 3)) 1097 if ((index < 0) || (index > 3))
1061 /* no index provided use the current one */ 1098 /* no index provided use the current one */
1062 index = current_index; 1099 index = current_index;
@@ -1210,6 +1247,489 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
1210 } 1247 }
1211} 1248}
1212 1249
1250static int prism54_set_genie(struct net_device *ndev,
1251 struct iw_request_info *info,
1252 struct iw_point *data, char *extra)
1253{
1254 islpci_private *priv = netdev_priv(ndev);
1255 int alen, ret = 0;
1256 struct obj_attachment *attach;
1257
1258 if (data->length > MAX_WPA_IE_LEN ||
1259 (data->length && extra == NULL))
1260 return -EINVAL;
1261
1262 memcpy(priv->wpa_ie, extra, data->length);
1263 priv->wpa_ie_len = data->length;
1264
1265 alen = sizeof(*attach) + priv->wpa_ie_len;
1266 attach = kzalloc(alen, GFP_KERNEL);
1267 if (attach == NULL)
1268 return -ENOMEM;
1269
1270#define WLAN_FC_TYPE_MGMT 0
1271#define WLAN_FC_STYPE_ASSOC_REQ 0
1272#define WLAN_FC_STYPE_REASSOC_REQ 2
1273
1274 /* Note: endianness is covered by mgt_set_varlen */
1275 attach->type = (WLAN_FC_TYPE_MGMT << 2) |
1276 (WLAN_FC_STYPE_ASSOC_REQ << 4);
1277 attach->id = -1;
1278 attach->size = priv->wpa_ie_len;
1279 memcpy(attach->data, extra, priv->wpa_ie_len);
1280
1281 ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
1282 priv->wpa_ie_len);
1283 if (ret == 0) {
1284 attach->type = (WLAN_FC_TYPE_MGMT << 2) |
1285 (WLAN_FC_STYPE_REASSOC_REQ << 4);
1286
1287 ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
1288 priv->wpa_ie_len);
1289 if (ret == 0)
1290 printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
1291 ndev->name);
1292 }
1293
1294 kfree(attach);
1295 return ret;
1296}
1297
1298
1299static int prism54_get_genie(struct net_device *ndev,
1300 struct iw_request_info *info,
1301 struct iw_point *data, char *extra)
1302{
1303 islpci_private *priv = netdev_priv(ndev);
1304 int len = priv->wpa_ie_len;
1305
1306 if (len <= 0) {
1307 data->length = 0;
1308 return 0;
1309 }
1310
1311 if (data->length < len)
1312 return -E2BIG;
1313
1314 data->length = len;
1315 memcpy(extra, priv->wpa_ie, len);
1316
1317 return 0;
1318}
1319
1320static int prism54_set_auth(struct net_device *ndev,
1321 struct iw_request_info *info,
1322 union iwreq_data *wrqu, char *extra)
1323{
1324 islpci_private *priv = netdev_priv(ndev);
1325 struct iw_param *param = &wrqu->param;
1326 u32 mlmelevel = 0, authen = 0, dot1x = 0;
1327 u32 exunencrypt = 0, privinvoked = 0, wpa = 0;
1328 u32 old_wpa;
1329 int ret = 0;
1330 union oid_res_t r;
1331
1332 if (islpci_get_state(priv) < PRV_STATE_INIT)
1333 return 0;
1334
1335 /* first get the flags */
1336 down_write(&priv->mib_sem);
1337 wpa = old_wpa = priv->wpa;
1338 up_write(&priv->mib_sem);
1339 ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
1340 authen = r.u;
1341 ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
1342 privinvoked = r.u;
1343 ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
1344 exunencrypt = r.u;
1345 ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
1346 dot1x = r.u;
1347 ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r);
1348 mlmelevel = r.u;
1349
1350 if (ret < 0)
1351 goto out;
1352
1353 switch (param->flags & IW_AUTH_INDEX) {
1354 case IW_AUTH_CIPHER_PAIRWISE:
1355 case IW_AUTH_CIPHER_GROUP:
1356 case IW_AUTH_KEY_MGMT:
1357 break;
1358
1359 case IW_AUTH_WPA_ENABLED:
1360 /* Do the same thing as IW_AUTH_WPA_VERSION */
1361 if (param->value) {
1362 wpa = 1;
1363 privinvoked = 1; /* For privacy invoked */
1364 exunencrypt = 1; /* Filter out all unencrypted frames */
1365 dot1x = 0x01; /* To enable eap filter */
1366 mlmelevel = DOT11_MLME_EXTENDED;
1367 authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
1368 } else {
1369 wpa = 0;
1370 privinvoked = 0;
1371 exunencrypt = 0; /* Do not filter un-encrypted data */
1372 dot1x = 0;
1373 mlmelevel = DOT11_MLME_AUTO;
1374 }
1375 break;
1376
1377 case IW_AUTH_WPA_VERSION:
1378 if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
1379 wpa = 0;
1380 privinvoked = 0;
1381 exunencrypt = 0; /* Do not filter un-encrypted data */
1382 dot1x = 0;
1383 mlmelevel = DOT11_MLME_AUTO;
1384 } else {
1385 if (param->value & IW_AUTH_WPA_VERSION_WPA)
1386 wpa = 1;
1387 else if (param->value & IW_AUTH_WPA_VERSION_WPA2)
1388 wpa = 2;
1389 privinvoked = 1; /* For privacy invoked */
1390 exunencrypt = 1; /* Filter out all unencrypted frames */
1391 dot1x = 0x01; /* To enable eap filter */
1392 mlmelevel = DOT11_MLME_EXTENDED;
1393 authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
1394 }
1395 break;
1396
1397 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1398 dot1x = param->value ? 1 : 0;
1399 break;
1400
1401 case IW_AUTH_PRIVACY_INVOKED:
1402 privinvoked = param->value ? 1 : 0;
1403
1404 case IW_AUTH_DROP_UNENCRYPTED:
1405 exunencrypt = param->value ? 1 : 0;
1406 break;
1407
1408 case IW_AUTH_80211_AUTH_ALG:
1409 if (param->value & IW_AUTH_ALG_SHARED_KEY) {
1410 /* Only WEP uses _SK and _BOTH */
1411 if (wpa > 0) {
1412 ret = -EINVAL;
1413 goto out;
1414 }
1415 authen = DOT11_AUTH_SK;
1416 } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
1417 authen = DOT11_AUTH_OS;
1418 } else {
1419 ret = -EINVAL;
1420 goto out;
1421 }
1422 break;
1423
1424 default:
1425 return -EOPNOTSUPP;
1426 }
1427
1428 /* Set all the values */
1429 down_write(&priv->mib_sem);
1430 priv->wpa = wpa;
1431 up_write(&priv->mib_sem);
1432 mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
1433 mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked);
1434 mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt);
1435 mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x);
1436 mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel);
1437
1438out:
1439 return ret;
1440}
1441
1442static int prism54_get_auth(struct net_device *ndev,
1443 struct iw_request_info *info,
1444 union iwreq_data *wrqu, char *extra)
1445{
1446 islpci_private *priv = netdev_priv(ndev);
1447 struct iw_param *param = &wrqu->param;
1448 u32 wpa = 0;
1449 int ret = 0;
1450 union oid_res_t r;
1451
1452 if (islpci_get_state(priv) < PRV_STATE_INIT)
1453 return 0;
1454
1455 /* first get the flags */
1456 down_write(&priv->mib_sem);
1457 wpa = priv->wpa;
1458 up_write(&priv->mib_sem);
1459
1460 switch (param->flags & IW_AUTH_INDEX) {
1461 case IW_AUTH_CIPHER_PAIRWISE:
1462 case IW_AUTH_CIPHER_GROUP:
1463 case IW_AUTH_KEY_MGMT:
1464 /*
1465 * wpa_supplicant will control these internally
1466 */
1467 ret = -EOPNOTSUPP;
1468 break;
1469
1470 case IW_AUTH_WPA_VERSION:
1471 switch (wpa) {
1472 case 1:
1473 param->value = IW_AUTH_WPA_VERSION_WPA;
1474 break;
1475 case 2:
1476 param->value = IW_AUTH_WPA_VERSION_WPA2;
1477 break;
1478 case 0:
1479 default:
1480 param->value = IW_AUTH_WPA_VERSION_DISABLED;
1481 break;
1482 }
1483 break;
1484
1485 case IW_AUTH_DROP_UNENCRYPTED:
1486 ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
1487 if (ret >= 0)
1488 param->value = r.u > 0 ? 1 : 0;
1489 break;
1490
1491 case IW_AUTH_80211_AUTH_ALG:
1492 ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
1493 if (ret >= 0) {
1494 switch (r.u) {
1495 case DOT11_AUTH_OS:
1496 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
1497 break;
1498 case DOT11_AUTH_BOTH:
1499 case DOT11_AUTH_SK:
1500 param->value = IW_AUTH_ALG_SHARED_KEY;
1501 case DOT11_AUTH_NONE:
1502 default:
1503 param->value = 0;
1504 break;
1505 }
1506 }
1507 break;
1508
1509 case IW_AUTH_WPA_ENABLED:
1510 param->value = wpa > 0 ? 1 : 0;
1511 break;
1512
1513 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1514 ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
1515 if (ret >= 0)
1516 param->value = r.u > 0 ? 1 : 0;
1517 break;
1518
1519 case IW_AUTH_PRIVACY_INVOKED:
1520 ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
1521 if (ret >= 0)
1522 param->value = r.u > 0 ? 1 : 0;
1523 break;
1524
1525 default:
1526 return -EOPNOTSUPP;
1527 }
1528 return ret;
1529}
1530
1531static int prism54_set_encodeext(struct net_device *ndev,
1532 struct iw_request_info *info,
1533 union iwreq_data *wrqu,
1534 char *extra)
1535{
1536 islpci_private *priv = netdev_priv(ndev);
1537 struct iw_point *encoding = &wrqu->encoding;
1538 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1539 int idx, alg = ext->alg, set_key = 1;
1540 union oid_res_t r;
1541 int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
1542 int ret = 0;
1543
1544 if (islpci_get_state(priv) < PRV_STATE_INIT)
1545 return 0;
1546
1547 /* Determine and validate the key index */
1548 idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
1549 if (idx) {
1550 if (idx < 0 || idx > 3)
1551 return -EINVAL;
1552 } else {
1553 ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
1554 if (ret < 0)
1555 goto out;
1556 idx = r.u;
1557 }
1558
1559 if (encoding->flags & IW_ENCODE_DISABLED)
1560 alg = IW_ENCODE_ALG_NONE;
1561
1562 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
1563 /* Only set transmit key index here, actual
1564 * key is set below if needed.
1565 */
1566 ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx);
1567 set_key = ext->key_len > 0 ? 1 : 0;
1568 }
1569
1570 if (set_key) {
1571 struct obj_key key = { DOT11_PRIV_WEP, 0, "" };
1572 switch (alg) {
1573 case IW_ENCODE_ALG_NONE:
1574 break;
1575 case IW_ENCODE_ALG_WEP:
1576 if (ext->key_len > KEY_SIZE_WEP104) {
1577 ret = -EINVAL;
1578 goto out;
1579 }
1580 if (ext->key_len > KEY_SIZE_WEP40)
1581 key.length = KEY_SIZE_WEP104;
1582 else
1583 key.length = KEY_SIZE_WEP40;
1584 break;
1585 case IW_ENCODE_ALG_TKIP:
1586 if (ext->key_len > KEY_SIZE_TKIP) {
1587 ret = -EINVAL;
1588 goto out;
1589 }
1590 key.type = DOT11_PRIV_TKIP;
1591 key.length = KEY_SIZE_TKIP;
1592 default:
1593 return -EINVAL;
1594 }
1595
1596 if (key.length) {
1597 memset(key.key, 0, sizeof(key.key));
1598 memcpy(key.key, ext->key, ext->key_len);
1599 ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx,
1600 &key);
1601 if (ret < 0)
1602 goto out;
1603 }
1604 }
1605
1606 /* Read the flags */
1607 if (encoding->flags & IW_ENCODE_DISABLED) {
1608 /* Encoding disabled,
1609 * authen = DOT11_AUTH_OS;
1610 * invoke = 0;
1611 * exunencrypt = 0; */
1612 }
1613 if (encoding->flags & IW_ENCODE_OPEN) {
1614 /* Encode but accept non-encoded packets. No auth */
1615 invoke = 1;
1616 }
1617 if (encoding->flags & IW_ENCODE_RESTRICTED) {
1618 /* Refuse non-encoded packets. Auth */
1619 authen = DOT11_AUTH_BOTH;
1620 invoke = 1;
1621 exunencrypt = 1;
1622 }
1623
1624 /* do the change if requested */
1625 if (encoding->flags & IW_ENCODE_MODE) {
1626 ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0,
1627 &authen);
1628 ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0,
1629 &invoke);
1630 ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
1631 &exunencrypt);
1632 }
1633
1634out:
1635 return ret;
1636}
1637
1638
1639static int prism54_get_encodeext(struct net_device *ndev,
1640 struct iw_request_info *info,
1641 union iwreq_data *wrqu,
1642 char *extra)
1643{
1644 islpci_private *priv = netdev_priv(ndev);
1645 struct iw_point *encoding = &wrqu->encoding;
1646 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1647 int idx, max_key_len;
1648 union oid_res_t r;
1649 int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0;
1650 int ret = 0;
1651
1652 if (islpci_get_state(priv) < PRV_STATE_INIT)
1653 return 0;
1654
1655 /* first get the flags */
1656 ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
1657 authen = r.u;
1658 ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
1659 invoke = r.u;
1660 ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
1661 exunencrypt = r.u;
1662 if (ret < 0)
1663 goto out;
1664
1665 max_key_len = encoding->length - sizeof(*ext);
1666 if (max_key_len < 0)
1667 return -EINVAL;
1668
1669 idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
1670 if (idx) {
1671 if (idx < 0 || idx > 3)
1672 return -EINVAL;
1673 } else {
1674 ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
1675 if (ret < 0)
1676 goto out;
1677 idx = r.u;
1678 }
1679
1680 encoding->flags = idx + 1;
1681 memset(ext, 0, sizeof(*ext));
1682
1683 switch (authen) {
1684 case DOT11_AUTH_BOTH:
1685 case DOT11_AUTH_SK:
1686 wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
1687 case DOT11_AUTH_OS:
1688 default:
1689 wrqu->encoding.flags |= IW_ENCODE_OPEN;
1690 break;
1691 }
1692
1693 down_write(&priv->mib_sem);
1694 wpa = priv->wpa;
1695 up_write(&priv->mib_sem);
1696
1697 if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) {
1698 /* No encryption */
1699 ext->alg = IW_ENCODE_ALG_NONE;
1700 ext->key_len = 0;
1701 wrqu->encoding.flags |= IW_ENCODE_DISABLED;
1702 } else {
1703 struct obj_key *key;
1704
1705 ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r);
1706 if (ret < 0)
1707 goto out;
1708 key = r.ptr;
1709 if (max_key_len < key->length) {
1710 ret = -E2BIG;
1711 goto out;
1712 }
1713 memcpy(ext->key, key->key, key->length);
1714 ext->key_len = key->length;
1715
1716 switch (key->type) {
1717 case DOT11_PRIV_TKIP:
1718 ext->alg = IW_ENCODE_ALG_TKIP;
1719 break;
1720 default:
1721 case DOT11_PRIV_WEP:
1722 ext->alg = IW_ENCODE_ALG_WEP;
1723 break;
1724 }
1725 wrqu->encoding.flags |= IW_ENCODE_ENABLED;
1726 }
1727
1728out:
1729 return ret;
1730}
1731
1732
1213static int 1733static int
1214prism54_reset(struct net_device *ndev, struct iw_request_info *info, 1734prism54_reset(struct net_device *ndev, struct iw_request_info *info,
1215 __u32 * uwrq, char *extra) 1735 __u32 * uwrq, char *extra)
@@ -1591,8 +2111,8 @@ static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
1591#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" 2111#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
1592 2112
1593static void 2113static void
1594prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, 2114prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
1595 u8 *wpa_ie, size_t wpa_ie_len) 2115 u8 *wpa_ie, size_t wpa_ie_len)
1596{ 2116{
1597 struct list_head *ptr; 2117 struct list_head *ptr;
1598 struct islpci_bss_wpa_ie *bss = NULL; 2118 struct islpci_bss_wpa_ie *bss = NULL;
@@ -1658,7 +2178,7 @@ prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
1658} 2178}
1659 2179
1660static size_t 2180static size_t
1661prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) 2181prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
1662{ 2182{
1663 struct list_head *ptr; 2183 struct list_head *ptr;
1664 struct islpci_bss_wpa_ie *bss = NULL; 2184 struct islpci_bss_wpa_ie *bss = NULL;
@@ -1683,14 +2203,14 @@ prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
1683} 2203}
1684 2204
1685void 2205void
1686prism54_wpa_ie_init(islpci_private *priv) 2206prism54_wpa_bss_ie_init(islpci_private *priv)
1687{ 2207{
1688 INIT_LIST_HEAD(&priv->bss_wpa_list); 2208 INIT_LIST_HEAD(&priv->bss_wpa_list);
1689 sema_init(&priv->wpa_sem, 1); 2209 sema_init(&priv->wpa_sem, 1);
1690} 2210}
1691 2211
1692void 2212void
1693prism54_wpa_ie_clean(islpci_private *priv) 2213prism54_wpa_bss_ie_clean(islpci_private *priv)
1694{ 2214{
1695 struct list_head *ptr, *n; 2215 struct list_head *ptr, *n;
1696 2216
@@ -1722,7 +2242,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
1722 } 2242 }
1723 if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && 2243 if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 &&
1724 memcmp(pos + 2, wpa_oid, 4) == 0) { 2244 memcmp(pos + 2, wpa_oid, 4) == 0) {
1725 prism54_wpa_ie_add(priv, addr, pos, pos[1] + 2); 2245 prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2);
1726 return; 2246 return;
1727 } 2247 }
1728 pos += 2 + pos[1]; 2248 pos += 2 + pos[1];
@@ -1879,7 +2399,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
1879 send_formatted_event(priv, "Associate request (ex)", mlme, 1); 2399 send_formatted_event(priv, "Associate request (ex)", mlme, 1);
1880 2400
1881 if (priv->iw_mode != IW_MODE_MASTER 2401 if (priv->iw_mode != IW_MODE_MASTER
1882 && mlmeex->state != DOT11_STATE_AUTHING) 2402 && mlmeex->state != DOT11_STATE_ASSOCING)
1883 break; 2403 break;
1884 2404
1885 confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); 2405 confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
@@ -1893,7 +2413,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
1893 confirm->state = 0; /* not used */ 2413 confirm->state = 0; /* not used */
1894 confirm->code = 0; 2414 confirm->code = 0;
1895 2415
1896 wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie); 2416 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
1897 2417
1898 if (!wpa_ie_len) { 2418 if (!wpa_ie_len) {
1899 printk(KERN_DEBUG "No WPA IE found from " 2419 printk(KERN_DEBUG "No WPA IE found from "
@@ -1937,7 +2457,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
1937 confirm->state = 0; /* not used */ 2457 confirm->state = 0; /* not used */
1938 confirm->code = 0; 2458 confirm->code = 0;
1939 2459
1940 wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie); 2460 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
1941 2461
1942 if (!wpa_ie_len) { 2462 if (!wpa_ie_len) {
1943 printk(KERN_DEBUG "No WPA IE found from " 2463 printk(KERN_DEBUG "No WPA IE found from "
@@ -2553,6 +3073,15 @@ static const iw_handler prism54_handler[] = {
2553 (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */ 3073 (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */
2554 (iw_handler) NULL, /* SIOCSIWPOWER */ 3074 (iw_handler) NULL, /* SIOCSIWPOWER */
2555 (iw_handler) NULL, /* SIOCGIWPOWER */ 3075 (iw_handler) NULL, /* SIOCGIWPOWER */
3076 NULL, /* -- hole -- */
3077 NULL, /* -- hole -- */
3078 (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */
3079 (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */
3080 (iw_handler) prism54_set_auth, /* SIOCSIWAUTH */
3081 (iw_handler) prism54_get_auth, /* SIOCGIWAUTH */
3082 (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */
3083 (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */
3084 NULL, /* SIOCSIWPMKSA */
2556}; 3085};
2557 3086
2558/* The low order bit identify a SET (0) or a GET (1) ioctl. */ 3087/* The low order bit identify a SET (0) or a GET (1) ioctl. */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index 46d5cde80c85..65f33acd0a42 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -27,7 +27,7 @@
27 27
28#include <net/iw_handler.h> /* New driver API */ 28#include <net/iw_handler.h> /* New driver API */
29 29
30#define SUPPORTED_WIRELESS_EXT 16 30#define SUPPORTED_WIRELESS_EXT 19
31 31
32void prism54_mib_init(islpci_private *); 32void prism54_mib_init(islpci_private *);
33 33
@@ -39,8 +39,8 @@ void prism54_acl_clean(struct islpci_acl *);
39 39
40void prism54_process_trap(void *); 40void prism54_process_trap(void *);
41 41
42void prism54_wpa_ie_init(islpci_private *priv); 42void prism54_wpa_bss_ie_init(islpci_private *priv);
43void prism54_wpa_ie_clean(islpci_private *priv); 43void prism54_wpa_bss_ie_clean(islpci_private *priv);
44 44
45int prism54_set_mac_address(struct net_device *, void *); 45int prism54_set_mac_address(struct net_device *, void *);
46 46
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5ddf29599032..ab3c5a27efd9 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -715,7 +715,7 @@ islpci_alloc_memory(islpci_private *priv)
715 } 715 }
716 716
717 prism54_acl_init(&priv->acl); 717 prism54_acl_init(&priv->acl);
718 prism54_wpa_ie_init(priv); 718 prism54_wpa_bss_ie_init(priv);
719 if (mgt_init(priv)) 719 if (mgt_init(priv))
720 goto out_free; 720 goto out_free;
721 721
@@ -774,7 +774,7 @@ islpci_free_memory(islpci_private *priv)
774 774
775 /* Free the acces control list and the WPA list */ 775 /* Free the acces control list and the WPA list */
776 prism54_acl_clean(&priv->acl); 776 prism54_acl_clean(&priv->acl);
777 prism54_wpa_ie_clean(priv); 777 prism54_wpa_bss_ie_clean(priv);
778 mgt_clean(priv); 778 mgt_clean(priv);
779 779
780 return 0; 780 return 0;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 07053165e4c5..5049f37455b1 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -179,6 +179,8 @@ typedef struct {
179 struct list_head bss_wpa_list; 179 struct list_head bss_wpa_list;
180 int num_bss_wpa; 180 int num_bss_wpa;
181 struct semaphore wpa_sem; 181 struct semaphore wpa_sem;
182 u8 wpa_ie[MAX_WPA_IE_LEN];
183 size_t wpa_ie_len;
182 184
183 struct work_struct reset_task; 185 struct work_struct reset_task;
184 int reset_task_pending; 186 int reset_task_pending;
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 09fc17a0f029..f692dccf0d07 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -313,7 +313,7 @@ prism54_module_init(void)
313 313
314 __bug_on_wrong_struct_sizes (); 314 __bug_on_wrong_struct_sizes ();
315 315
316 return pci_module_init(&prism54_driver); 316 return pci_register_driver(&prism54_driver);
317} 317}
318 318
319/* by the time prism54_module_exit() terminates, as a postcondition 319/* by the time prism54_module_exit() terminates, as a postcondition
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 61b83a5e737a..8e112d139e29 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -52,8 +52,8 @@
52#include <pcmcia/ds.h> 52#include <pcmcia/ds.h>
53#include <pcmcia/mem_op.h> 53#include <pcmcia/mem_op.h>
54 54
55#include <net/ieee80211.h>
56#include <linux/wireless.h> 55#include <linux/wireless.h>
56#include <net/iw_handler.h>
57 57
58#include <asm/io.h> 58#include <asm/io.h>
59#include <asm/system.h> 59#include <asm/system.h>
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 7f78b7801fb3..bcc7038130f6 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle)
242 u_int save_cor; 242 u_int save_cor;
243 243
244 /* Doing it if hardware is gone is guaranteed crash */ 244 /* Doing it if hardware is gone is guaranteed crash */
245 if (pcmcia_dev_present(link)) 245 if (!pcmcia_dev_present(link))
246 return -ENODEV; 246 return -ENODEV;
247 247
248 /* Save original COR value */ 248 /* Save original COR value */
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index fd31885c6844..ccaf28e8db0a 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -467,6 +467,7 @@ static int arp_query(unsigned char *haddr, u32 paddr,
467 struct net_device *dev) 467 struct net_device *dev)
468{ 468{
469 struct neighbour *neighbor_entry; 469 struct neighbour *neighbor_entry;
470 int ret = 0;
470 471
471 neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev); 472 neighbor_entry = neigh_lookup(&arp_tbl, &paddr, dev);
472 473
@@ -474,10 +475,11 @@ static int arp_query(unsigned char *haddr, u32 paddr,
474 neighbor_entry->used = jiffies; 475 neighbor_entry->used = jiffies;
475 if (neighbor_entry->nud_state & NUD_VALID) { 476 if (neighbor_entry->nud_state & NUD_VALID) {
476 memcpy(haddr, neighbor_entry->ha, dev->addr_len); 477 memcpy(haddr, neighbor_entry->ha, dev->addr_len);
477 return 1; 478 ret = 1;
478 } 479 }
480 neigh_release(neighbor_entry);
479 } 481 }
480 return 0; 482 return ret;
481} 483}
482 484
483static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr, 485static void DumpData(char *msg, struct strip *strip_info, __u8 * ptr,
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 662ecc8a33ff..c52e9bcf8d02 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1820,6 +1820,8 @@ static int zd1201_probe(struct usb_interface *interface,
1820 zd->dev->name); 1820 zd->dev->name);
1821 1821
1822 usb_set_intfdata(interface, zd); 1822 usb_set_intfdata(interface, zd);
1823 zd1201_enable(zd); /* zd1201 likes to startup enabled, */
1824 zd1201_disable(zd); /* interfering with all the wifis in range */
1823 return 0; 1825 return 0;
1824 1826
1825err_net: 1827err_net:
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 500314fc74d2..6603ad5be63d 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_ZD1211RW) += zd1211rw.o
3zd1211rw-objs := zd_chip.o zd_ieee80211.o \ 3zd1211rw-objs := zd_chip.o zd_ieee80211.o \
4 zd_mac.o zd_netdev.o \ 4 zd_mac.o zd_netdev.o \
5 zd_rf_al2230.o zd_rf_rf2959.o \ 5 zd_rf_al2230.o zd_rf_rf2959.o \
6 zd_rf_al7230b.o \
6 zd_rf.o zd_usb.o zd_util.o 7 zd_rf.o zd_usb.o zd_util.o
7 8
8ifeq ($(CONFIG_ZD1211RW_DEBUG),y) 9ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index efc9c4bd826f..7c4e32cf0d47 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -42,12 +42,11 @@ void zd_chip_init(struct zd_chip *chip,
42 42
43void zd_chip_clear(struct zd_chip *chip) 43void zd_chip_clear(struct zd_chip *chip)
44{ 44{
45 mutex_lock(&chip->mutex); 45 ZD_ASSERT(!mutex_is_locked(&chip->mutex));
46 zd_usb_clear(&chip->usb); 46 zd_usb_clear(&chip->usb);
47 zd_rf_clear(&chip->rf); 47 zd_rf_clear(&chip->rf);
48 mutex_unlock(&chip->mutex);
49 mutex_destroy(&chip->mutex); 48 mutex_destroy(&chip->mutex);
50 memset(chip, 0, sizeof(*chip)); 49 ZD_MEMCLEAR(chip, sizeof(*chip));
51} 50}
52 51
53static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size) 52static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size)
@@ -68,10 +67,11 @@ static int scnprint_id(struct zd_chip *chip, char *buffer, size_t size)
68 i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i); 67 i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i);
69 i += scnprintf(buffer+i, size-i, " "); 68 i += scnprintf(buffer+i, size-i, " ");
70 i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i); 69 i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i);
71 i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c", chip->pa_type, 70 i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c", chip->pa_type,
72 chip->patch_cck_gain ? 'g' : '-', 71 chip->patch_cck_gain ? 'g' : '-',
73 chip->patch_cr157 ? '7' : '-', 72 chip->patch_cr157 ? '7' : '-',
74 chip->patch_6m_band_edge ? '6' : '-'); 73 chip->patch_6m_band_edge ? '6' : '-',
74 chip->new_phy_layout ? 'N' : '-');
75 return i; 75 return i;
76} 76}
77 77
@@ -330,13 +330,14 @@ static int read_pod(struct zd_chip *chip, u8 *rf_type)
330 chip->patch_cck_gain = (value >> 8) & 0x1; 330 chip->patch_cck_gain = (value >> 8) & 0x1;
331 chip->patch_cr157 = (value >> 13) & 0x1; 331 chip->patch_cr157 = (value >> 13) & 0x1;
332 chip->patch_6m_band_edge = (value >> 21) & 0x1; 332 chip->patch_6m_band_edge = (value >> 21) & 0x1;
333 chip->new_phy_layout = (value >> 31) & 0x1;
333 334
334 dev_dbg_f(zd_chip_dev(chip), 335 dev_dbg_f(zd_chip_dev(chip),
335 "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d " 336 "RF %s %#01x PA type %#01x patch CCK %d patch CR157 %d "
336 "patch 6M %d\n", 337 "patch 6M %d new PHY %d\n",
337 zd_rf_name(*rf_type), *rf_type, 338 zd_rf_name(*rf_type), *rf_type,
338 chip->pa_type, chip->patch_cck_gain, 339 chip->pa_type, chip->patch_cck_gain,
339 chip->patch_cr157, chip->patch_6m_band_edge); 340 chip->patch_cr157, chip->patch_6m_band_edge, chip->new_phy_layout);
340 return 0; 341 return 0;
341error: 342error:
342 *rf_type = 0; 343 *rf_type = 0;
@@ -344,6 +345,7 @@ error:
344 chip->patch_cck_gain = 0; 345 chip->patch_cck_gain = 0;
345 chip->patch_cr157 = 0; 346 chip->patch_cr157 = 0;
346 chip->patch_6m_band_edge = 0; 347 chip->patch_6m_band_edge = 0;
348 chip->new_phy_layout = 0;
347 return r; 349 return r;
348} 350}
349 351
@@ -717,7 +719,7 @@ static int zd1211b_hw_reset_phy(struct zd_chip *chip)
717 { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 }, 719 { CR21, 0x0e }, { CR22, 0x23 }, { CR23, 0x90 },
718 { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 }, 720 { CR24, 0x14 }, { CR25, 0x40 }, { CR26, 0x10 },
719 { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 }, 721 { CR27, 0x10 }, { CR28, 0x7f }, { CR29, 0x80 },
720 { CR30, 0x49 }, /* jointly decoder, no ASIC */ 722 { CR30, 0x4b }, /* ASIC/FWT, no jointly decoder */
721 { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 }, 723 { CR31, 0x60 }, { CR32, 0x43 }, { CR33, 0x08 },
722 { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 }, 724 { CR34, 0x06 }, { CR35, 0x0a }, { CR36, 0x00 },
723 { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c }, 725 { CR37, 0x00 }, { CR38, 0x38 }, { CR39, 0x0c },
@@ -797,7 +799,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
797 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 }, 799 { CR_ADDA_MBIAS_WARMTIME, 0x30000808 },
798 { CR_ZD1211_RETRY_MAX, 0x2 }, 800 { CR_ZD1211_RETRY_MAX, 0x2 },
799 { CR_SNIFFER_ON, 0 }, 801 { CR_SNIFFER_ON, 0 },
800 { CR_RX_FILTER, AP_RX_FILTER }, 802 { CR_RX_FILTER, STA_RX_FILTER },
801 { CR_GROUP_HASH_P1, 0x00 }, 803 { CR_GROUP_HASH_P1, 0x00 },
802 { CR_GROUP_HASH_P2, 0x80000000 }, 804 { CR_GROUP_HASH_P2, 0x80000000 },
803 { CR_REG1, 0xa4 }, 805 { CR_REG1, 0xa4 },
@@ -807,7 +809,6 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
807 { CR_ACK_TIMEOUT_EXT, 0x80 }, 809 { CR_ACK_TIMEOUT_EXT, 0x80 },
808 { CR_ADDA_PWR_DWN, 0x00 }, 810 { CR_ADDA_PWR_DWN, 0x00 },
809 { CR_ACK_TIME_80211, 0x100 }, 811 { CR_ACK_TIME_80211, 0x100 },
810 { CR_IFS_VALUE, 0x547c032 },
811 { CR_RX_PE_DELAY, 0x70 }, 812 { CR_RX_PE_DELAY, 0x70 },
812 { CR_PS_CTRL, 0x10000000 }, 813 { CR_PS_CTRL, 0x10000000 },
813 { CR_RTS_CTS_RATE, 0x02030203 }, 814 { CR_RTS_CTS_RATE, 0x02030203 },
@@ -844,7 +845,7 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
844 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 845 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
845 { CR_ZD1211B_TXOP, 0x01800824 }, 846 { CR_ZD1211B_TXOP, 0x01800824 },
846 { CR_SNIFFER_ON, 0 }, 847 { CR_SNIFFER_ON, 0 },
847 { CR_RX_FILTER, AP_RX_FILTER }, 848 { CR_RX_FILTER, STA_RX_FILTER },
848 { CR_GROUP_HASH_P1, 0x00 }, 849 { CR_GROUP_HASH_P1, 0x00 },
849 { CR_GROUP_HASH_P2, 0x80000000 }, 850 { CR_GROUP_HASH_P2, 0x80000000 },
850 { CR_REG1, 0xa4 }, 851 { CR_REG1, 0xa4 },
@@ -854,11 +855,10 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
854 { CR_ACK_TIMEOUT_EXT, 0x80 }, 855 { CR_ACK_TIMEOUT_EXT, 0x80 },
855 { CR_ADDA_PWR_DWN, 0x00 }, 856 { CR_ADDA_PWR_DWN, 0x00 },
856 { CR_ACK_TIME_80211, 0x100 }, 857 { CR_ACK_TIME_80211, 0x100 },
857 { CR_IFS_VALUE, 0x547c032 },
858 { CR_RX_PE_DELAY, 0x70 }, 858 { CR_RX_PE_DELAY, 0x70 },
859 { CR_PS_CTRL, 0x10000000 }, 859 { CR_PS_CTRL, 0x10000000 },
860 { CR_RTS_CTS_RATE, 0x02030203 }, 860 { CR_RTS_CTS_RATE, 0x02030203 },
861 { CR_RX_THRESHOLD, 0x000c0640 }, 861 { CR_RX_THRESHOLD, 0x000c0eff, },
862 { CR_AFTER_PNP, 0x1 }, 862 { CR_AFTER_PNP, 0x1 },
863 { CR_WEP_PROTECT, 0x114 }, 863 { CR_WEP_PROTECT, 0x114 },
864 }; 864 };
@@ -970,10 +970,15 @@ static int hw_init(struct zd_chip *chip)
970 r = hw_init_hmac(chip); 970 r = hw_init_hmac(chip);
971 if (r) 971 if (r)
972 return r; 972 return r;
973 r = set_beacon_interval(chip, 100); 973
974 /* Although the vendor driver defaults to a different value during
975 * init, it overwrites the IFS value with the following every time
976 * the channel changes. We should aim to be more intelligent... */
977 r = zd_iowrite32_locked(chip, IFS_VALUE_DEFAULT, CR_IFS_VALUE);
974 if (r) 978 if (r)
975 return r; 979 return r;
976 return 0; 980
981 return set_beacon_interval(chip, 100);
977} 982}
978 983
979#ifdef DEBUG 984#ifdef DEBUG
@@ -1430,9 +1435,43 @@ static int ofdm_qual_db(u8 status_quality, u8 rate, unsigned int size)
1430 break; 1435 break;
1431 } 1436 }
1432 1437
1438 switch (rate) {
1439 case ZD_OFDM_RATE_6M:
1440 case ZD_OFDM_RATE_9M:
1441 i += 3;
1442 break;
1443 case ZD_OFDM_RATE_12M:
1444 case ZD_OFDM_RATE_18M:
1445 i += 5;
1446 break;
1447 case ZD_OFDM_RATE_24M:
1448 case ZD_OFDM_RATE_36M:
1449 i += 9;
1450 break;
1451 case ZD_OFDM_RATE_48M:
1452 case ZD_OFDM_RATE_54M:
1453 i += 15;
1454 break;
1455 default:
1456 return -EINVAL;
1457 }
1458
1433 return i; 1459 return i;
1434} 1460}
1435 1461
1462static int ofdm_qual_percent(u8 status_quality, u8 rate, unsigned int size)
1463{
1464 int r;
1465
1466 r = ofdm_qual_db(status_quality, rate, size);
1467 ZD_ASSERT(r >= 0);
1468 if (r < 0)
1469 r = 0;
1470
1471 r = (r * 100)/29;
1472 return r <= 100 ? r : 100;
1473}
1474
1436static unsigned int log10times100(unsigned int x) 1475static unsigned int log10times100(unsigned int x)
1437{ 1476{
1438 static const u8 log10[] = { 1477 static const u8 log10[] = {
@@ -1476,31 +1515,28 @@ static int cck_snr_db(u8 status_quality)
1476 return r; 1515 return r;
1477} 1516}
1478 1517
1479static int rx_qual_db(const void *rx_frame, unsigned int size, 1518static int cck_qual_percent(u8 status_quality)
1480 const struct rx_status *status)
1481{ 1519{
1482 return (status->frame_status&ZD_RX_OFDM) ? 1520 int r;
1483 ofdm_qual_db(status->signal_quality_ofdm, 1521
1484 zd_ofdm_plcp_header_rate(rx_frame), 1522 r = cck_snr_db(status_quality);
1485 size) : 1523 r = (100*r)/17;
1486 cck_snr_db(status->signal_quality_cck); 1524 return r <= 100 ? r : 100;
1487} 1525}
1488 1526
1489u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size, 1527u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
1490 const struct rx_status *status) 1528 const struct rx_status *status)
1491{ 1529{
1492 int r = rx_qual_db(rx_frame, size, status); 1530 return (status->frame_status&ZD_RX_OFDM) ?
1493 if (r < 0) 1531 ofdm_qual_percent(status->signal_quality_ofdm,
1494 r = 0; 1532 zd_ofdm_plcp_header_rate(rx_frame),
1495 r = (r * 100) / 14; 1533 size) :
1496 if (r > 100) 1534 cck_qual_percent(status->signal_quality_cck);
1497 r = 100;
1498 return r;
1499} 1535}
1500 1536
1501u8 zd_rx_strength_percent(u8 rssi) 1537u8 zd_rx_strength_percent(u8 rssi)
1502{ 1538{
1503 int r = (rssi*100) / 30; 1539 int r = (rssi*100) / 41;
1504 if (r > 100) 1540 if (r > 100)
1505 r = 100; 1541 r = 100;
1506 return (u8) r; 1542 return (u8) r;
@@ -1613,3 +1649,34 @@ int zd_rfwritev_locked(struct zd_chip *chip,
1613 1649
1614 return 0; 1650 return 0;
1615} 1651}
1652
1653/*
1654 * We can optionally program the RF directly through CR regs, if supported by
1655 * the hardware. This is much faster than the older method.
1656 */
1657int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
1658{
1659 struct zd_ioreq16 ioreqs[] = {
1660 { CR244, (value >> 16) & 0xff },
1661 { CR243, (value >> 8) & 0xff },
1662 { CR242, value & 0xff },
1663 };
1664 ZD_ASSERT(mutex_is_locked(&chip->mutex));
1665 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
1666}
1667
1668int zd_rfwritev_cr_locked(struct zd_chip *chip,
1669 const u32 *values, unsigned int count)
1670{
1671 int r;
1672 unsigned int i;
1673
1674 for (i = 0; i < count; i++) {
1675 r = zd_rfwrite_cr_locked(chip, values[i]);
1676 if (r)
1677 return r;
1678 }
1679
1680 return 0;
1681}
1682
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 805121093ab5..4b1250859897 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -461,14 +461,27 @@
461 461
462#define CR_RX_FILTER CTL_REG(0x068c) 462#define CR_RX_FILTER CTL_REG(0x068c)
463#define RX_FILTER_ASSOC_RESPONSE 0x0002 463#define RX_FILTER_ASSOC_RESPONSE 0x0002
464#define RX_FILTER_REASSOC_RESPONSE 0x0008
464#define RX_FILTER_PROBE_RESPONSE 0x0020 465#define RX_FILTER_PROBE_RESPONSE 0x0020
465#define RX_FILTER_BEACON 0x0100 466#define RX_FILTER_BEACON 0x0100
467#define RX_FILTER_DISASSOC 0x0400
466#define RX_FILTER_AUTH 0x0800 468#define RX_FILTER_AUTH 0x0800
467/* Sniff modus sets filter to 0xfffff */ 469#define AP_RX_FILTER 0x0400feff
470#define STA_RX_FILTER 0x0000ffff
471
472/* Monitor mode sets filter to 0xfffff */
468 473
469#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) 474#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
470#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694) 475#define CR_BCN_FIFO_SEMAPHORE CTL_REG(0x0694)
476
471#define CR_IFS_VALUE CTL_REG(0x0698) 477#define CR_IFS_VALUE CTL_REG(0x0698)
478#define IFS_VALUE_DIFS_SH 0
479#define IFS_VALUE_EIFS_SH 12
480#define IFS_VALUE_SIFS_SH 24
481#define IFS_VALUE_DEFAULT (( 50 << IFS_VALUE_DIFS_SH) | \
482 (1148 << IFS_VALUE_EIFS_SH) | \
483 ( 10 << IFS_VALUE_SIFS_SH))
484
472#define CR_RX_TIME_OUT CTL_REG(0x069C) 485#define CR_RX_TIME_OUT CTL_REG(0x069C)
473#define CR_TOTAL_RX_FRM CTL_REG(0x06A0) 486#define CR_TOTAL_RX_FRM CTL_REG(0x06A0)
474#define CR_CRC32_CNT CTL_REG(0x06A4) 487#define CR_CRC32_CNT CTL_REG(0x06A4)
@@ -546,9 +559,6 @@
546#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 559#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
547#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 560#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
548 561
549#define AP_RX_FILTER 0x0400feff
550#define STA_RX_FILTER 0x0000ffff
551
552#define CWIN_SIZE 0x007f043f 562#define CWIN_SIZE 0x007f043f
553 563
554 564
@@ -628,6 +638,7 @@ enum {
628 LOAD_CODE_SIZE = 0xe, /* words */ 638 LOAD_CODE_SIZE = 0xe, /* words */
629 LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */ 639 LOAD_VECT_SIZE = 0x10000 - 0xfff7, /* words */
630 EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE, 640 EEPROM_REGS_OFFSET = LOAD_CODE_SIZE + LOAD_VECT_SIZE,
641 EEPROM_REGS_SIZE = 0x7e, /* words */
631 E2P_BASE_OFFSET = EEPROM_START_OFFSET + 642 E2P_BASE_OFFSET = EEPROM_START_OFFSET +
632 EEPROM_REGS_OFFSET, 643 EEPROM_REGS_OFFSET,
633}; 644};
@@ -653,7 +664,7 @@ struct zd_chip {
653 /* SetPointOFDM in the vendor driver */ 664 /* SetPointOFDM in the vendor driver */
654 u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT]; 665 u8 ofdm_cal_values[3][E2P_CHANNEL_COUNT];
655 u8 pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, 666 u8 pa_type:4, patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1,
656 is_zd1211b:1; 667 new_phy_layout:1, is_zd1211b:1;
657}; 668};
658 669
659static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb) 670static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb)
@@ -737,8 +748,12 @@ static inline int zd_rfwrite_locked(struct zd_chip *chip, u32 value, u8 bits)
737 return zd_usb_rfwrite(&chip->usb, value, bits); 748 return zd_usb_rfwrite(&chip->usb, value, bits);
738} 749}
739 750
751int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value);
752
740int zd_rfwritev_locked(struct zd_chip *chip, 753int zd_rfwritev_locked(struct zd_chip *chip,
741 const u32* values, unsigned int count, u8 bits); 754 const u32* values, unsigned int count, u8 bits);
755int zd_rfwritev_cr_locked(struct zd_chip *chip,
756 const u32* values, unsigned int count);
742 757
743/* Locking functions for reading and writing registers. 758/* Locking functions for reading and writing registers.
744 * The different parameters are intentional. 759 * The different parameters are intentional.
diff --git a/drivers/net/wireless/zd1211rw/zd_def.h b/drivers/net/wireless/zd1211rw/zd_def.h
index 465906812fc4..a13ec72eb304 100644
--- a/drivers/net/wireless/zd1211rw/zd_def.h
+++ b/drivers/net/wireless/zd1211rw/zd_def.h
@@ -45,4 +45,10 @@ do { \
45# define ZD_ASSERT(x) do { } while (0) 45# define ZD_ASSERT(x) do { } while (0)
46#endif 46#endif
47 47
48#ifdef DEBUG
49# define ZD_MEMCLEAR(pointer, size) memset((pointer), 0xff, (size))
50#else
51# define ZD_MEMCLEAR(pointer, size) do { } while (0)
52#endif
53
48#endif /* _ZD_DEF_H */ 54#endif /* _ZD_DEF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.h b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
index 36329890dfec..f63245b0d966 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.h
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.h
@@ -64,7 +64,7 @@ struct cck_plcp_header {
64 u8 service; 64 u8 service;
65 __le16 length; 65 __le16 length;
66 __le16 crc16; 66 __le16 crc16;
67} __attribute__((packed)); 67};
68 68
69static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header) 69static inline u8 zd_cck_plcp_header_rate(const struct cck_plcp_header *header)
70{ 70{
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 3bdc54d128d0..1989f1c05fbe 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -108,7 +108,9 @@ int zd_mac_init_hw(struct zd_mac *mac, u8 device_type)
108 if (r) 108 if (r)
109 goto disable_int; 109 goto disable_int;
110 110
111 r = zd_set_encryption_type(chip, NO_WEP); 111 /* We must inform the device that we are doing encryption/decryption in
112 * software at the moment. */
113 r = zd_set_encryption_type(chip, ENC_SNIFFER);
112 if (r) 114 if (r)
113 goto disable_int; 115 goto disable_int;
114 116
@@ -125,21 +127,17 @@ out:
125 127
126void zd_mac_clear(struct zd_mac *mac) 128void zd_mac_clear(struct zd_mac *mac)
127{ 129{
128 /* Aquire the lock. */
129 spin_lock(&mac->lock);
130 spin_unlock(&mac->lock);
131 zd_chip_clear(&mac->chip); 130 zd_chip_clear(&mac->chip);
132 memset(mac, 0, sizeof(*mac)); 131 ZD_ASSERT(!spin_is_locked(&mac->lock));
132 ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
133} 133}
134 134
135static int reset_mode(struct zd_mac *mac) 135static int reset_mode(struct zd_mac *mac)
136{ 136{
137 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); 137 struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
138 struct zd_ioreq32 ioreqs[3] = { 138 struct zd_ioreq32 ioreqs[3] = {
139 { CR_RX_FILTER, RX_FILTER_BEACON|RX_FILTER_PROBE_RESPONSE| 139 { CR_RX_FILTER, STA_RX_FILTER },
140 RX_FILTER_AUTH|RX_FILTER_ASSOC_RESPONSE },
141 { CR_SNIFFER_ON, 0U }, 140 { CR_SNIFFER_ON, 0U },
142 { CR_ENCRYPTION_TYPE, NO_WEP },
143 }; 141 };
144 142
145 if (ieee->iw_mode == IW_MODE_MONITOR) { 143 if (ieee->iw_mode == IW_MODE_MONITOR) {
@@ -713,9 +711,9 @@ static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri)
713struct zd_rt_hdr { 711struct zd_rt_hdr {
714 struct ieee80211_radiotap_header rt_hdr; 712 struct ieee80211_radiotap_header rt_hdr;
715 u8 rt_flags; 713 u8 rt_flags;
714 u8 rt_rate;
716 u16 rt_channel; 715 u16 rt_channel;
717 u16 rt_chbitmask; 716 u16 rt_chbitmask;
718 u16 rt_rate;
719}; 717};
720 718
721static void fill_rt_header(void *buffer, struct zd_mac *mac, 719static void fill_rt_header(void *buffer, struct zd_mac *mac,
@@ -735,14 +733,14 @@ static void fill_rt_header(void *buffer, struct zd_mac *mac,
735 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256)) 733 if (status->decryption_type & (ZD_RX_WEP64|ZD_RX_WEP128|ZD_RX_WEP256))
736 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP; 734 hdr->rt_flags |= IEEE80211_RADIOTAP_F_WEP;
737 735
736 hdr->rt_rate = stats->rate / 5;
737
738 /* FIXME: 802.11a */ 738 /* FIXME: 802.11a */
739 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz( 739 hdr->rt_channel = cpu_to_le16(ieee80211chan2mhz(
740 _zd_chip_get_channel(&mac->chip))); 740 _zd_chip_get_channel(&mac->chip)));
741 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ | 741 hdr->rt_chbitmask = cpu_to_le16(IEEE80211_CHAN_2GHZ |
742 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) == 742 ((status->frame_status & ZD_RX_FRAME_MODULATION_MASK) ==
743 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK)); 743 ZD_RX_OFDM ? IEEE80211_CHAN_OFDM : IEEE80211_CHAN_CCK));
744
745 hdr->rt_rate = stats->rate / 5;
746} 744}
747 745
748/* Returns 1 if the data packet is for us and 0 otherwise. */ 746/* Returns 1 if the data packet is for us and 0 otherwise. */
@@ -816,13 +814,25 @@ static int filter_rx(struct ieee80211_device *ieee,
816 return -EINVAL; 814 return -EINVAL;
817} 815}
818 816
819static void update_qual_rssi(struct zd_mac *mac, u8 qual_percent, u8 rssi) 817static void update_qual_rssi(struct zd_mac *mac,
818 const u8 *buffer, unsigned int length,
819 u8 qual_percent, u8 rssi_percent)
820{ 820{
821 unsigned long flags; 821 unsigned long flags;
822 struct ieee80211_hdr_3addr *hdr;
823 int i;
824
825 hdr = (struct ieee80211_hdr_3addr *)buffer;
826 if (length < offsetof(struct ieee80211_hdr_3addr, addr3))
827 return;
828 if (memcmp(hdr->addr2, zd_mac_to_ieee80211(mac)->bssid, ETH_ALEN) != 0)
829 return;
822 830
823 spin_lock_irqsave(&mac->lock, flags); 831 spin_lock_irqsave(&mac->lock, flags);
824 mac->qual_average = (7 * mac->qual_average + qual_percent) / 8; 832 i = mac->stats_count % ZD_MAC_STATS_BUFFER_SIZE;
825 mac->rssi_average = (7 * mac->rssi_average + rssi) / 8; 833 mac->qual_buffer[i] = qual_percent;
834 mac->rssi_buffer[i] = rssi_percent;
835 mac->stats_count++;
826 spin_unlock_irqrestore(&mac->lock, flags); 836 spin_unlock_irqrestore(&mac->lock, flags);
827} 837}
828 838
@@ -853,7 +863,6 @@ static int fill_rx_stats(struct ieee80211_rx_stats *stats,
853 if (stats->rate) 863 if (stats->rate)
854 stats->mask |= IEEE80211_STATMASK_RATE; 864 stats->mask |= IEEE80211_STATMASK_RATE;
855 865
856 update_qual_rssi(mac, stats->signal, stats->rssi);
857 return 0; 866 return 0;
858} 867}
859 868
@@ -877,6 +886,8 @@ int zd_mac_rx(struct zd_mac *mac, const u8 *buffer, unsigned int length)
877 sizeof(struct rx_status); 886 sizeof(struct rx_status);
878 buffer += ZD_PLCP_HEADER_SIZE; 887 buffer += ZD_PLCP_HEADER_SIZE;
879 888
889 update_qual_rssi(mac, buffer, length, stats.signal, stats.rssi);
890
880 r = filter_rx(ieee, buffer, length, &stats); 891 r = filter_rx(ieee, buffer, length, &stats);
881 if (r <= 0) 892 if (r <= 0)
882 return r; 893 return r;
@@ -981,17 +992,31 @@ struct iw_statistics *zd_mac_get_wireless_stats(struct net_device *ndev)
981{ 992{
982 struct zd_mac *mac = zd_netdev_mac(ndev); 993 struct zd_mac *mac = zd_netdev_mac(ndev);
983 struct iw_statistics *iw_stats = &mac->iw_stats; 994 struct iw_statistics *iw_stats = &mac->iw_stats;
995 unsigned int i, count, qual_total, rssi_total;
984 996
985 memset(iw_stats, 0, sizeof(struct iw_statistics)); 997 memset(iw_stats, 0, sizeof(struct iw_statistics));
986 /* We are not setting the status, because ieee->state is not updated 998 /* We are not setting the status, because ieee->state is not updated
987 * at all and this driver doesn't track authentication state. 999 * at all and this driver doesn't track authentication state.
988 */ 1000 */
989 spin_lock_irq(&mac->lock); 1001 spin_lock_irq(&mac->lock);
990 iw_stats->qual.qual = mac->qual_average; 1002 count = mac->stats_count < ZD_MAC_STATS_BUFFER_SIZE ?
991 iw_stats->qual.level = mac->rssi_average; 1003 mac->stats_count : ZD_MAC_STATS_BUFFER_SIZE;
992 iw_stats->qual.updated = IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED| 1004 qual_total = rssi_total = 0;
993 IW_QUAL_NOISE_INVALID; 1005 for (i = 0; i < count; i++) {
1006 qual_total += mac->qual_buffer[i];
1007 rssi_total += mac->rssi_buffer[i];
1008 }
994 spin_unlock_irq(&mac->lock); 1009 spin_unlock_irq(&mac->lock);
1010 iw_stats->qual.updated = IW_QUAL_NOISE_INVALID;
1011 if (count > 0) {
1012 iw_stats->qual.qual = qual_total / count;
1013 iw_stats->qual.level = rssi_total / count;
1014 iw_stats->qual.updated |=
1015 IW_QUAL_QUAL_UPDATED|IW_QUAL_LEVEL_UPDATED;
1016 } else {
1017 iw_stats->qual.updated |=
1018 IW_QUAL_QUAL_INVALID|IW_QUAL_LEVEL_INVALID;
1019 }
995 /* TODO: update counter */ 1020 /* TODO: update counter */
996 return iw_stats; 1021 return iw_stats;
997} 1022}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 71e382c589ee..29b51fd7d4e5 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -1,4 +1,4 @@
1/* zd_mac.c 1/* zd_mac.h
2 * 2 *
3 * This program is free software; you can redistribute it and/or modify 3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by 4 * it under the terms of the GNU General Public License as published by
@@ -82,18 +82,18 @@ struct zd_ctrlset {
82struct rx_length_info { 82struct rx_length_info {
83 __le16 length[3]; 83 __le16 length[3];
84 __le16 tag; 84 __le16 tag;
85} __attribute__((packed)); 85};
86 86
87#define RX_LENGTH_INFO_TAG 0x697e 87#define RX_LENGTH_INFO_TAG 0x697e
88 88
89struct rx_status { 89struct rx_status {
90 u8 signal_quality_cck;
90 /* rssi */ 91 /* rssi */
91 u8 signal_strength; 92 u8 signal_strength;
92 u8 signal_quality_cck;
93 u8 signal_quality_ofdm; 93 u8 signal_quality_ofdm;
94 u8 decryption_type; 94 u8 decryption_type;
95 u8 frame_status; 95 u8 frame_status;
96} __attribute__((packed)); 96};
97 97
98/* rx_status field decryption_type */ 98/* rx_status field decryption_type */
99#define ZD_RX_NO_WEP 0 99#define ZD_RX_NO_WEP 0
@@ -120,14 +120,17 @@ enum mac_flags {
120 MAC_FIXED_CHANNEL = 0x01, 120 MAC_FIXED_CHANNEL = 0x01,
121}; 121};
122 122
123#define ZD_MAC_STATS_BUFFER_SIZE 16
124
123struct zd_mac { 125struct zd_mac {
124 struct net_device *netdev;
125 struct zd_chip chip; 126 struct zd_chip chip;
126 spinlock_t lock; 127 spinlock_t lock;
128 struct net_device *netdev;
127 /* Unlocked reading possible */ 129 /* Unlocked reading possible */
128 struct iw_statistics iw_stats; 130 struct iw_statistics iw_stats;
129 u8 qual_average; 131 unsigned int stats_count;
130 u8 rssi_average; 132 u8 qual_buffer[ZD_MAC_STATS_BUFFER_SIZE];
133 u8 rssi_buffer[ZD_MAC_STATS_BUFFER_SIZE];
131 u8 regdomain; 134 u8 regdomain;
132 u8 default_regdomain; 135 u8 default_regdomain;
133 u8 requested_channel; 136 u8 requested_channel;
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c
index 9df232c2c863..440ef24b5fd1 100644
--- a/drivers/net/wireless/zd1211rw/zd_netdev.c
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.c
@@ -72,10 +72,18 @@ static int iw_get_name(struct net_device *netdev,
72 struct iw_request_info *info, 72 struct iw_request_info *info,
73 union iwreq_data *req, char *extra) 73 union iwreq_data *req, char *extra)
74{ 74{
75 /* FIXME: check whether 802.11a will also supported, add also 75 /* FIXME: check whether 802.11a will also supported */
76 * zd1211B, if we support it. 76 strlcpy(req->name, "IEEE 802.11b/g", IFNAMSIZ);
77 */ 77 return 0;
78 strlcpy(req->name, "802.11g zd1211", IFNAMSIZ); 78}
79
80static int iw_get_nick(struct net_device *netdev,
81 struct iw_request_info *info,
82 union iwreq_data *req, char *extra)
83{
84 strcpy(extra, "zd1211");
85 req->data.length = strlen(extra) + 1;
86 req->data.flags = 1;
79 return 0; 87 return 0;
80} 88}
81 89
@@ -181,6 +189,7 @@ static int iw_get_encodeext(struct net_device *netdev,
181 189
182static const iw_handler zd_standard_iw_handlers[] = { 190static const iw_handler zd_standard_iw_handlers[] = {
183 WX(SIOCGIWNAME) = iw_get_name, 191 WX(SIOCGIWNAME) = iw_get_name,
192 WX(SIOCGIWNICKN) = iw_get_nick,
184 WX(SIOCSIWFREQ) = iw_set_freq, 193 WX(SIOCSIWFREQ) = iw_set_freq,
185 WX(SIOCGIWFREQ) = iw_get_freq, 194 WX(SIOCGIWFREQ) = iw_get_freq,
186 WX(SIOCSIWMODE) = iw_set_mode, 195 WX(SIOCSIWMODE) = iw_set_mode,
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index d3770d2c61bc..f50cff3db916 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -56,7 +56,7 @@ void zd_rf_init(struct zd_rf *rf)
56 56
57void zd_rf_clear(struct zd_rf *rf) 57void zd_rf_clear(struct zd_rf *rf)
58{ 58{
59 memset(rf, 0, sizeof(*rf)); 59 ZD_MEMCLEAR(rf, sizeof(*rf));
60} 60}
61 61
62int zd_rf_init_hw(struct zd_rf *rf, u8 type) 62int zd_rf_init_hw(struct zd_rf *rf, u8 type)
@@ -76,6 +76,11 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type)
76 if (r) 76 if (r)
77 return r; 77 return r;
78 break; 78 break;
79 case AL7230B_RF:
80 r = zd_rf_init_al7230b(rf);
81 if (r)
82 return r;
83 break;
79 default: 84 default:
80 dev_err(zd_chip_dev(chip), 85 dev_err(zd_chip_dev(chip),
81 "RF %s %#x is not supported\n", zd_rf_name(type), type); 86 "RF %s %#x is not supported\n", zd_rf_name(type), type);
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index ea30f693fcc8..676b3734f1ed 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -78,5 +78,6 @@ int zd_switch_radio_off(struct zd_rf *rf);
78 78
79int zd_rf_init_rf2959(struct zd_rf *rf); 79int zd_rf_init_rf2959(struct zd_rf *rf);
80int zd_rf_init_al2230(struct zd_rf *rf); 80int zd_rf_init_al2230(struct zd_rf *rf);
81int zd_rf_init_al7230b(struct zd_rf *rf);
81 82
82#endif /* _ZD_RF_H */ 83#endif /* _ZD_RF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 0948b25f660d..25323a13a3db 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -21,7 +21,7 @@
21#include "zd_usb.h" 21#include "zd_usb.h"
22#include "zd_chip.h" 22#include "zd_chip.h"
23 23
24static const u32 al2230_table[][3] = { 24static const u32 zd1211_al2230_table[][3] = {
25 RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, 25 RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, },
26 RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, 26 RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, },
27 RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, }, 27 RF_CHANNEL( 3) = { 0x03e790, 0x033331, 0x00000d, },
@@ -38,6 +38,53 @@ static const u32 al2230_table[][3] = {
38 RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, }, 38 RF_CHANNEL(14) = { 0x03e7c0, 0x066661, 0x00000d, },
39}; 39};
40 40
41static const u32 zd1211b_al2230_table[][3] = {
42 RF_CHANNEL( 1) = { 0x09efc0, 0x8cccc0, 0xb00000, },
43 RF_CHANNEL( 2) = { 0x09efc0, 0x8cccd0, 0xb00000, },
44 RF_CHANNEL( 3) = { 0x09e7c0, 0x8cccc0, 0xb00000, },
45 RF_CHANNEL( 4) = { 0x09e7c0, 0x8cccd0, 0xb00000, },
46 RF_CHANNEL( 5) = { 0x05efc0, 0x8cccc0, 0xb00000, },
47 RF_CHANNEL( 6) = { 0x05efc0, 0x8cccd0, 0xb00000, },
48 RF_CHANNEL( 7) = { 0x05e7c0, 0x8cccc0, 0xb00000, },
49 RF_CHANNEL( 8) = { 0x05e7c0, 0x8cccd0, 0xb00000, },
50 RF_CHANNEL( 9) = { 0x0defc0, 0x8cccc0, 0xb00000, },
51 RF_CHANNEL(10) = { 0x0defc0, 0x8cccd0, 0xb00000, },
52 RF_CHANNEL(11) = { 0x0de7c0, 0x8cccc0, 0xb00000, },
53 RF_CHANNEL(12) = { 0x0de7c0, 0x8cccd0, 0xb00000, },
54 RF_CHANNEL(13) = { 0x03efc0, 0x8cccc0, 0xb00000, },
55 RF_CHANNEL(14) = { 0x03e7c0, 0x866660, 0xb00000, },
56};
57
58static const struct zd_ioreq16 zd1211b_ioreqs_shared_1[] = {
59 { CR240, 0x57 }, { CR9, 0xe0 },
60};
61
62static int zd1211b_al2230_finalize_rf(struct zd_chip *chip)
63{
64 int r;
65 static const struct zd_ioreq16 ioreqs[] = {
66 { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 },
67 { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 },
68 { CR203, 0x06 },
69 { },
70
71 { CR240, 0x80 },
72 };
73
74 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
75 if (r)
76 return r;
77
78 /* related to antenna selection? */
79 if (chip->new_phy_layout) {
80 r = zd_iowrite16_locked(chip, 0xe1, CR9);
81 if (r)
82 return r;
83 }
84
85 return zd_iowrite16_locked(chip, 0x06, CR203);
86}
87
41static int zd1211_al2230_init_hw(struct zd_rf *rf) 88static int zd1211_al2230_init_hw(struct zd_rf *rf)
42{ 89{
43 int r; 90 int r;
@@ -139,7 +186,7 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
139 { CR47, 0x1e }, 186 { CR47, 0x1e },
140 187
141 /* ZD1211B 05.06.10 */ 188 /* ZD1211B 05.06.10 */
142 { CR48, 0x00 }, { CR49, 0x00 }, { CR51, 0x01 }, 189 { CR48, 0x06 }, { CR49, 0xf9 }, { CR51, 0x01 },
143 { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 }, 190 { CR52, 0x80 }, { CR53, 0x7e }, { CR65, 0x00 },
144 { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 }, 191 { CR66, 0x00 }, { CR67, 0x00 }, { CR68, 0x00 },
145 { CR69, 0x28 }, 192 { CR69, 0x28 },
@@ -172,79 +219,78 @@ static int zd1211b_al2230_init_hw(struct zd_rf *rf)
172 { CR137, 0x50 }, /* 5614 */ 219 { CR137, 0x50 }, /* 5614 */
173 { CR138, 0xa8 }, 220 { CR138, 0xa8 },
174 { CR144, 0xac }, /* 5621 */ 221 { CR144, 0xac }, /* 5621 */
175 { CR150, 0x0d }, { CR252, 0x00 }, { CR253, 0x00 }, 222 { CR150, 0x0d }, { CR252, 0x34 }, { CR253, 0x34 },
176 }; 223 };
177 224
178 static const u32 rv1[] = { 225 static const u32 rv1[] = {
179 /* channel 1 */ 226 0x8cccd0,
180 0x03f790, 227 0x481dc0,
181 0x033331, 228 0xcfff00,
182 0x00000d, 229 0x25a000,
183 230
184 0x0b3331, 231 /* To improve AL2230 yield, improve phase noise, 4713 */
185 0x03b812, 232 0x25a000,
186 0x00fff3, 233 0xa3b2f0,
187 0x0005a4, 234
188 0x0f4dc5, /* fix freq shift 0x044dc5 */ 235 0x6da010, /* Reg6 update for MP versio */
189 0x0805b6, 236 0xe36280, /* Modified by jxiao for Bor-Chin on 2004/08/02 */
190 0x0146c7, 237 0x116000,
191 0x000688, 238 0x9dc020, /* External control TX power (CR31) */
192 0x0403b9, /* External control TX power (CR31) */ 239 0x5ddb00, /* RegA update for MP version */
193 0x00dbba, 240 0xd99000, /* RegB update for MP version */
194 0x00099b, 241 0x3ffbd0, /* RegC update for MP version */
195 0x0bdffc, 242 0xb00000, /* RegD update for MP version */
196 0x00000d, 243
197 0x00580f, 244 /* improve phase noise and remove phase calibration,4713 */
245 0xf01a00,
198 }; 246 };
199 247
200 static const struct zd_ioreq16 ioreqs2[] = { 248 static const struct zd_ioreq16 ioreqs2[] = {
201 { CR47, 0x1e }, { CR_RFCFG, 0x03 }, 249 { CR251, 0x2f }, /* shdnb(PLL_ON)=0 */
250 { CR251, 0x7f }, /* shdnb(PLL_ON)=1 */
202 }; 251 };
203 252
204 static const u32 rv2[] = { 253 static const u32 rv2[] = {
205 0x00880f, 254 /* To improve AL2230 yield, 4713 */
206 0x00080f, 255 0xf01b00,
256 0xf01e00,
257 0xf01a00,
207 }; 258 };
208 259
209 static const struct zd_ioreq16 ioreqs3[] = { 260 static const struct zd_ioreq16 ioreqs3[] = {
210 { CR_RFCFG, 0x00 }, { CR47, 0x1e }, { CR251, 0x7f }, 261 /* related to 6M band edge patching, happens unconditionally */
211 }; 262 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
212
213 static const u32 rv3[] = {
214 0x00d80f,
215 0x00780f,
216 0x00580f,
217 };
218
219 static const struct zd_ioreq16 ioreqs4[] = {
220 { CR138, 0x28 }, { CR203, 0x06 },
221 }; 263 };
222 264
265 r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1,
266 ARRAY_SIZE(zd1211b_ioreqs_shared_1));
267 if (r)
268 return r;
223 r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1)); 269 r = zd_iowrite16a_locked(chip, ioreqs1, ARRAY_SIZE(ioreqs1));
224 if (r) 270 if (r)
225 return r; 271 return r;
226 r = zd_rfwritev_locked(chip, rv1, ARRAY_SIZE(rv1), RF_RV_BITS); 272 r = zd_rfwritev_cr_locked(chip, zd1211b_al2230_table[0], 3);
227 if (r) 273 if (r)
228 return r; 274 return r;
229 r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2)); 275 r = zd_rfwritev_cr_locked(chip, rv1, ARRAY_SIZE(rv1));
230 if (r) 276 if (r)
231 return r; 277 return r;
232 r = zd_rfwritev_locked(chip, rv2, ARRAY_SIZE(rv2), RF_RV_BITS); 278 r = zd_iowrite16a_locked(chip, ioreqs2, ARRAY_SIZE(ioreqs2));
233 if (r) 279 if (r)
234 return r; 280 return r;
235 r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3)); 281 r = zd_rfwritev_cr_locked(chip, rv2, ARRAY_SIZE(rv2));
236 if (r) 282 if (r)
237 return r; 283 return r;
238 r = zd_rfwritev_locked(chip, rv3, ARRAY_SIZE(rv3), RF_RV_BITS); 284 r = zd_iowrite16a_locked(chip, ioreqs3, ARRAY_SIZE(ioreqs3));
239 if (r) 285 if (r)
240 return r; 286 return r;
241 return zd_iowrite16a_locked(chip, ioreqs4, ARRAY_SIZE(ioreqs4)); 287 return zd1211b_al2230_finalize_rf(chip);
242} 288}
243 289
244static int al2230_set_channel(struct zd_rf *rf, u8 channel) 290static int zd1211_al2230_set_channel(struct zd_rf *rf, u8 channel)
245{ 291{
246 int r; 292 int r;
247 const u32 *rv = al2230_table[channel-1]; 293 const u32 *rv = zd1211_al2230_table[channel-1];
248 struct zd_chip *chip = zd_rf_to_chip(rf); 294 struct zd_chip *chip = zd_rf_to_chip(rf);
249 static const struct zd_ioreq16 ioreqs[] = { 295 static const struct zd_ioreq16 ioreqs[] = {
250 { CR138, 0x28 }, 296 { CR138, 0x28 },
@@ -257,6 +303,24 @@ static int al2230_set_channel(struct zd_rf *rf, u8 channel)
257 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 303 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
258} 304}
259 305
306static int zd1211b_al2230_set_channel(struct zd_rf *rf, u8 channel)
307{
308 int r;
309 const u32 *rv = zd1211b_al2230_table[channel-1];
310 struct zd_chip *chip = zd_rf_to_chip(rf);
311
312 r = zd_iowrite16a_locked(chip, zd1211b_ioreqs_shared_1,
313 ARRAY_SIZE(zd1211b_ioreqs_shared_1));
314 if (r)
315 return r;
316
317 r = zd_rfwritev_cr_locked(chip, rv, 3);
318 if (r)
319 return r;
320
321 return zd1211b_al2230_finalize_rf(chip);
322}
323
260static int zd1211_al2230_switch_radio_on(struct zd_rf *rf) 324static int zd1211_al2230_switch_radio_on(struct zd_rf *rf)
261{ 325{
262 struct zd_chip *chip = zd_rf_to_chip(rf); 326 struct zd_chip *chip = zd_rf_to_chip(rf);
@@ -294,13 +358,14 @@ int zd_rf_init_al2230(struct zd_rf *rf)
294{ 358{
295 struct zd_chip *chip = zd_rf_to_chip(rf); 359 struct zd_chip *chip = zd_rf_to_chip(rf);
296 360
297 rf->set_channel = al2230_set_channel;
298 rf->switch_radio_off = al2230_switch_radio_off; 361 rf->switch_radio_off = al2230_switch_radio_off;
299 if (chip->is_zd1211b) { 362 if (chip->is_zd1211b) {
300 rf->init_hw = zd1211b_al2230_init_hw; 363 rf->init_hw = zd1211b_al2230_init_hw;
364 rf->set_channel = zd1211b_al2230_set_channel;
301 rf->switch_radio_on = zd1211b_al2230_switch_radio_on; 365 rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
302 } else { 366 } else {
303 rf->init_hw = zd1211_al2230_init_hw; 367 rf->init_hw = zd1211_al2230_init_hw;
368 rf->set_channel = zd1211_al2230_set_channel;
304 rf->switch_radio_on = zd1211_al2230_switch_radio_on; 369 rf->switch_radio_on = zd1211_al2230_switch_radio_on;
305 } 370 }
306 rf->patch_6m_band_edge = 1; 371 rf->patch_6m_band_edge = 1;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
new file mode 100644
index 000000000000..a289f95187ec
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -0,0 +1,274 @@
1/* zd_rf_al7230b.c: Functions for the AL7230B RF controller
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/kernel.h>
19
20#include "zd_rf.h"
21#include "zd_usb.h"
22#include "zd_chip.h"
23
24static const u32 chan_rv[][2] = {
25 RF_CHANNEL( 1) = { 0x09ec00, 0x8cccc8 },
26 RF_CHANNEL( 2) = { 0x09ec00, 0x8cccd8 },
27 RF_CHANNEL( 3) = { 0x09ec00, 0x8cccc0 },
28 RF_CHANNEL( 4) = { 0x09ec00, 0x8cccd0 },
29 RF_CHANNEL( 5) = { 0x05ec00, 0x8cccc8 },
30 RF_CHANNEL( 6) = { 0x05ec00, 0x8cccd8 },
31 RF_CHANNEL( 7) = { 0x05ec00, 0x8cccc0 },
32 RF_CHANNEL( 8) = { 0x05ec00, 0x8cccd0 },
33 RF_CHANNEL( 9) = { 0x0dec00, 0x8cccc8 },
34 RF_CHANNEL(10) = { 0x0dec00, 0x8cccd8 },
35 RF_CHANNEL(11) = { 0x0dec00, 0x8cccc0 },
36 RF_CHANNEL(12) = { 0x0dec00, 0x8cccd0 },
37 RF_CHANNEL(13) = { 0x03ec00, 0x8cccc8 },
38 RF_CHANNEL(14) = { 0x03ec00, 0x866660 },
39};
40
41static const u32 std_rv[] = {
42 0x4ff821,
43 0xc5fbfc,
44 0x21ebfe,
45 0xafd401, /* freq shift 0xaad401 */
46 0x6cf56a,
47 0xe04073,
48 0x193d76,
49 0x9dd844,
50 0x500007,
51 0xd8c010,
52};
53
54static int al7230b_init_hw(struct zd_rf *rf)
55{
56 int i, r;
57 struct zd_chip *chip = zd_rf_to_chip(rf);
58
59 /* All of these writes are identical to AL2230 unless otherwise
60 * specified */
61 static const struct zd_ioreq16 ioreqs_1[] = {
62 /* This one is 7230-specific, and happens before the rest */
63 { CR240, 0x57 },
64 { },
65
66 { CR15, 0x20 }, { CR23, 0x40 }, { CR24, 0x20 },
67 { CR26, 0x11 }, { CR28, 0x3e }, { CR29, 0x00 },
68 { CR44, 0x33 },
69 /* This value is different for 7230 (was: 0x2a) */
70 { CR106, 0x22 },
71 { CR107, 0x1a }, { CR109, 0x09 }, { CR110, 0x27 },
72 { CR111, 0x2b }, { CR112, 0x2b }, { CR119, 0x0a },
73 /* This happened further down in AL2230,
74 * and the value changed (was: 0xe0) */
75 { CR122, 0xfc },
76 { CR10, 0x89 },
77 /* for newest (3rd cut) AL2300 */
78 { CR17, 0x28 },
79 { CR26, 0x93 }, { CR34, 0x30 },
80 /* for newest (3rd cut) AL2300 */
81 { CR35, 0x3e },
82 { CR41, 0x24 }, { CR44, 0x32 },
83 /* for newest (3rd cut) AL2300 */
84 { CR46, 0x96 },
85 { CR47, 0x1e }, { CR79, 0x58 }, { CR80, 0x30 },
86 { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 },
87 { CR92, 0x0a }, { CR99, 0x28 },
88 /* This value is different for 7230 (was: 0x00) */
89 { CR100, 0x02 },
90 { CR101, 0x13 }, { CR102, 0x27 },
91 /* This value is different for 7230 (was: 0x24) */
92 { CR106, 0x22 },
93 /* This value is different for 7230 (was: 0x2a) */
94 { CR107, 0x3f },
95 { CR109, 0x09 },
96 /* This value is different for 7230 (was: 0x13) */
97 { CR110, 0x1f },
98 { CR111, 0x1f }, { CR112, 0x1f }, { CR113, 0x27 },
99 { CR114, 0x27 },
100 /* for newest (3rd cut) AL2300 */
101 { CR115, 0x24 },
102 /* This value is different for 7230 (was: 0x24) */
103 { CR116, 0x3f },
104 /* This value is different for 7230 (was: 0xf4) */
105 { CR117, 0xfa },
106 { CR118, 0xfc }, { CR119, 0x10 }, { CR120, 0x4f },
107 { CR121, 0x77 }, { CR137, 0x88 },
108 /* This one is 7230-specific */
109 { CR138, 0xa8 },
110 /* This value is different for 7230 (was: 0xff) */
111 { CR252, 0x34 },
112 /* This value is different for 7230 (was: 0xff) */
113 { CR253, 0x34 },
114
115 /* PLL_OFF */
116 { CR251, 0x2f },
117 };
118
119 static const struct zd_ioreq16 ioreqs_2[] = {
120 /* PLL_ON */
121 { CR251, 0x3f },
122 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
123 { CR38, 0x38 }, { CR136, 0xdf },
124 };
125
126 r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1));
127 if (r)
128 return r;
129
130 r = zd_rfwrite_cr_locked(chip, 0x09ec04);
131 if (r)
132 return r;
133 r = zd_rfwrite_cr_locked(chip, 0x8cccc8);
134 if (r)
135 return r;
136
137 for (i = 0; i < ARRAY_SIZE(std_rv); i++) {
138 r = zd_rfwrite_cr_locked(chip, std_rv[i]);
139 if (r)
140 return r;
141 }
142
143 r = zd_rfwrite_cr_locked(chip, 0x3c9000);
144 if (r)
145 return r;
146 r = zd_rfwrite_cr_locked(chip, 0xbfffff);
147 if (r)
148 return r;
149 r = zd_rfwrite_cr_locked(chip, 0x700000);
150 if (r)
151 return r;
152 r = zd_rfwrite_cr_locked(chip, 0xf15d58);
153 if (r)
154 return r;
155
156 r = zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2));
157 if (r)
158 return r;
159
160 r = zd_rfwrite_cr_locked(chip, 0xf15d59);
161 if (r)
162 return r;
163 r = zd_rfwrite_cr_locked(chip, 0xf15d5c);
164 if (r)
165 return r;
166 r = zd_rfwrite_cr_locked(chip, 0xf15d58);
167 if (r)
168 return r;
169
170 r = zd_iowrite16_locked(chip, 0x06, CR203);
171 if (r)
172 return r;
173 r = zd_iowrite16_locked(chip, 0x80, CR240);
174 if (r)
175 return r;
176
177 return 0;
178}
179
180static int al7230b_set_channel(struct zd_rf *rf, u8 channel)
181{
182 int i, r;
183 const u32 *rv = chan_rv[channel-1];
184 struct zd_chip *chip = zd_rf_to_chip(rf);
185
186 struct zd_ioreq16 ioreqs_1[] = {
187 { CR128, 0x14 }, { CR129, 0x12 }, { CR130, 0x10 },
188 { CR38, 0x38 }, { CR136, 0xdf },
189 };
190
191 struct zd_ioreq16 ioreqs_2[] = {
192 /* PLL_ON */
193 { CR251, 0x3f },
194 { CR203, 0x06 }, { CR240, 0x08 },
195 };
196
197 r = zd_iowrite16_locked(chip, 0x57, CR240);
198 if (r)
199 return r;
200
201 /* PLL_OFF */
202 r = zd_iowrite16_locked(chip, 0x2f, CR251);
203 if (r)
204 return r;
205
206 for (i = 0; i < ARRAY_SIZE(std_rv); i++) {
207 r = zd_rfwrite_cr_locked(chip, std_rv[i]);
208 if (r)
209 return r;
210 }
211
212 r = zd_rfwrite_cr_locked(chip, 0x3c9000);
213 if (r)
214 return r;
215 r = zd_rfwrite_cr_locked(chip, 0xf15d58);
216 if (r)
217 return r;
218
219 r = zd_iowrite16a_locked(chip, ioreqs_1, ARRAY_SIZE(ioreqs_1));
220 if (r)
221 return r;
222
223 for (i = 0; i < 2; i++) {
224 r = zd_rfwrite_cr_locked(chip, rv[i]);
225 if (r)
226 return r;
227 }
228
229 r = zd_rfwrite_cr_locked(chip, 0x3c9000);
230 if (r)
231 return r;
232
233 return zd_iowrite16a_locked(chip, ioreqs_2, ARRAY_SIZE(ioreqs_2));
234}
235
236static int al7230b_switch_radio_on(struct zd_rf *rf)
237{
238 struct zd_chip *chip = zd_rf_to_chip(rf);
239 static const struct zd_ioreq16 ioreqs[] = {
240 { CR11, 0x00 },
241 { CR251, 0x3f },
242 };
243
244 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
245}
246
247static int al7230b_switch_radio_off(struct zd_rf *rf)
248{
249 struct zd_chip *chip = zd_rf_to_chip(rf);
250 static const struct zd_ioreq16 ioreqs[] = {
251 { CR11, 0x04 },
252 { CR251, 0x2f },
253 };
254
255 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
256}
257
258int zd_rf_init_al7230b(struct zd_rf *rf)
259{
260 struct zd_chip *chip = zd_rf_to_chip(rf);
261
262 if (chip->is_zd1211b) {
263 dev_err(zd_chip_dev(chip), "AL7230B is currently not "
264 "supported for ZD1211B devices\n");
265 return -ENODEV;
266 }
267
268 rf->init_hw = al7230b_init_hw;
269 rf->set_channel = al7230b_set_channel;
270 rf->switch_radio_on = al7230b_switch_radio_on;
271 rf->switch_radio_off = al7230b_switch_radio_off;
272 rf->patch_6m_band_edge = 1;
273 return 0;
274}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72f90525bf68..31027e52b04b 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <asm/unaligned.h> 18#include <asm/unaligned.h>
19#include <linux/kernel.h>
19#include <linux/init.h> 20#include <linux/init.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/firmware.h> 22#include <linux/firmware.h>
@@ -39,9 +40,19 @@ static struct usb_device_id usb_ids[] = {
39 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 40 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
40 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, 41 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
41 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 42 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
43 { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
44 { USB_DEVICE(0x1740, 0x2000), .driver_info = DEVICE_ZD1211 },
45 { USB_DEVICE(0x157e, 0x3204), .driver_info = DEVICE_ZD1211 },
46 { USB_DEVICE(0x0586, 0x3402), .driver_info = DEVICE_ZD1211 },
47 { USB_DEVICE(0x0b3b, 0x5630), .driver_info = DEVICE_ZD1211 },
48 { USB_DEVICE(0x0b05, 0x170c), .driver_info = DEVICE_ZD1211 },
42 /* ZD1211B */ 49 /* ZD1211B */
43 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 50 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
44 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 51 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
52 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
53 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
54 /* "Driverless" devices that need ejecting */
55 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
45 {} 56 {}
46}; 57};
47 58
@@ -263,6 +274,39 @@ static char *get_fw_name(char *buffer, size_t size, u8 device_type,
263 return buffer; 274 return buffer;
264} 275}
265 276
277static int handle_version_mismatch(struct usb_device *udev, u8 device_type,
278 const struct firmware *ub_fw)
279{
280 const struct firmware *ur_fw = NULL;
281 int offset;
282 int r = 0;
283 char fw_name[128];
284
285 r = request_fw_file(&ur_fw,
286 get_fw_name(fw_name, sizeof(fw_name), device_type, "ur"),
287 &udev->dev);
288 if (r)
289 goto error;
290
291 r = upload_code(udev, ur_fw->data, ur_fw->size, FW_START_OFFSET,
292 REBOOT);
293 if (r)
294 goto error;
295
296 offset = ((EEPROM_REGS_OFFSET + EEPROM_REGS_SIZE) * sizeof(u16));
297 r = upload_code(udev, ub_fw->data + offset, ub_fw->size - offset,
298 E2P_BASE_OFFSET + EEPROM_REGS_SIZE, REBOOT);
299
300 /* At this point, the vendor driver downloads the whole firmware
301 * image, hacks around with version IDs, and uploads it again,
302 * completely overwriting the boot code. We do not do this here as
303 * it is not required on any tested devices, and it is suspected to
304 * cause problems. */
305error:
306 release_firmware(ur_fw);
307 return r;
308}
309
266static int upload_firmware(struct usb_device *udev, u8 device_type) 310static int upload_firmware(struct usb_device *udev, u8 device_type)
267{ 311{
268 int r; 312 int r;
@@ -282,15 +326,17 @@ static int upload_firmware(struct usb_device *udev, u8 device_type)
282 326
283 fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET); 327 fw_bcdDevice = get_word(ub_fw->data, EEPROM_REGS_OFFSET);
284 328
285 /* FIXME: do we have any reason to perform the kludge that the vendor
286 * driver does when there is a version mismatch? (their driver uploads
287 * different firmwares and stuff)
288 */
289 if (fw_bcdDevice != bcdDevice) { 329 if (fw_bcdDevice != bcdDevice) {
290 dev_info(&udev->dev, 330 dev_info(&udev->dev,
291 "firmware device id %#06x and actual device id " 331 "firmware version %#06x and device bootcode version "
292 "%#06x differ, continuing anyway\n", 332 "%#06x differ\n", fw_bcdDevice, bcdDevice);
293 fw_bcdDevice, bcdDevice); 333 if (bcdDevice <= 0x4313)
334 dev_warn(&udev->dev, "device has old bootcode, please "
335 "report success or failure\n");
336
337 r = handle_version_mismatch(udev, device_type, ub_fw);
338 if (r)
339 goto error;
294 } else { 340 } else {
295 dev_dbg_f(&udev->dev, 341 dev_dbg_f(&udev->dev,
296 "firmware device id %#06x is equal to the " 342 "firmware device id %#06x is equal to the "
@@ -323,7 +369,6 @@ static void disable_read_regs_int(struct zd_usb *usb)
323{ 369{
324 struct zd_usb_interrupt *intr = &usb->intr; 370 struct zd_usb_interrupt *intr = &usb->intr;
325 371
326 ZD_ASSERT(in_interrupt());
327 spin_lock(&intr->lock); 372 spin_lock(&intr->lock);
328 intr->read_regs_enabled = 0; 373 intr->read_regs_enabled = 0;
329 spin_unlock(&intr->lock); 374 spin_unlock(&intr->lock);
@@ -545,11 +590,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
545 * be padded. Unaligned access might also happen if the length_info 590 * be padded. Unaligned access might also happen if the length_info
546 * structure is not present. 591 * structure is not present.
547 */ 592 */
548 if (get_unaligned(&length_info->tag) == RX_LENGTH_INFO_TAG) { 593 if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG))
594 {
549 unsigned int l, k, n; 595 unsigned int l, k, n;
550 for (i = 0, l = 0;; i++) { 596 for (i = 0, l = 0;; i++) {
551 k = le16_to_cpu(get_unaligned( 597 k = le16_to_cpu(get_unaligned(&length_info->length[i]));
552 &length_info->length[i]));
553 n = l+k; 598 n = l+k;
554 if (n > length) 599 if (n > length)
555 return; 600 return;
@@ -621,7 +666,7 @@ resubmit:
621 usb_submit_urb(urb, GFP_ATOMIC); 666 usb_submit_urb(urb, GFP_ATOMIC);
622} 667}
623 668
624struct urb *alloc_urb(struct zd_usb *usb) 669static struct urb *alloc_urb(struct zd_usb *usb)
625{ 670{
626 struct usb_device *udev = zd_usb_to_usbdev(usb); 671 struct usb_device *udev = zd_usb_to_usbdev(usb);
627 struct urb *urb; 672 struct urb *urb;
@@ -645,7 +690,7 @@ struct urb *alloc_urb(struct zd_usb *usb)
645 return urb; 690 return urb;
646} 691}
647 692
648void free_urb(struct urb *urb) 693static void free_urb(struct urb *urb)
649{ 694{
650 if (!urb) 695 if (!urb)
651 return; 696 return;
@@ -865,7 +910,7 @@ void zd_usb_clear(struct zd_usb *usb)
865{ 910{
866 usb_set_intfdata(usb->intf, NULL); 911 usb_set_intfdata(usb->intf, NULL);
867 usb_put_intf(usb->intf); 912 usb_put_intf(usb->intf);
868 memset(usb, 0, sizeof(*usb)); 913 ZD_MEMCLEAR(usb, sizeof(*usb));
869 /* FIXME: usb_interrupt, usb_tx, usb_rx? */ 914 /* FIXME: usb_interrupt, usb_tx, usb_rx? */
870} 915}
871 916
@@ -911,6 +956,55 @@ static void print_id(struct usb_device *udev)
911#define print_id(udev) do { } while (0) 956#define print_id(udev) do { } while (0)
912#endif 957#endif
913 958
959static int eject_installer(struct usb_interface *intf)
960{
961 struct usb_device *udev = interface_to_usbdev(intf);
962 struct usb_host_interface *iface_desc = &intf->altsetting[0];
963 struct usb_endpoint_descriptor *endpoint;
964 unsigned char *cmd;
965 u8 bulk_out_ep;
966 int r;
967
968 /* Find bulk out endpoint */
969 endpoint = &iface_desc->endpoint[1].desc;
970 if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT &&
971 (endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
972 USB_ENDPOINT_XFER_BULK) {
973 bulk_out_ep = endpoint->bEndpointAddress;
974 } else {
975 dev_err(&udev->dev,
976 "zd1211rw: Could not find bulk out endpoint\n");
977 return -ENODEV;
978 }
979
980 cmd = kzalloc(31, GFP_KERNEL);
981 if (cmd == NULL)
982 return -ENODEV;
983
984 /* USB bulk command block */
985 cmd[0] = 0x55; /* bulk command signature */
986 cmd[1] = 0x53; /* bulk command signature */
987 cmd[2] = 0x42; /* bulk command signature */
988 cmd[3] = 0x43; /* bulk command signature */
989 cmd[14] = 6; /* command length */
990
991 cmd[15] = 0x1b; /* SCSI command: START STOP UNIT */
992 cmd[19] = 0x2; /* eject disc */
993
994 dev_info(&udev->dev, "Ejecting virtual installer media...\n");
995 r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, bulk_out_ep),
996 cmd, 31, NULL, 2000);
997 kfree(cmd);
998 if (r)
999 return r;
1000
1001 /* At this point, the device disconnects and reconnects with the real
1002 * ID numbers. */
1003
1004 usb_set_intfdata(intf, NULL);
1005 return 0;
1006}
1007
914static int probe(struct usb_interface *intf, const struct usb_device_id *id) 1008static int probe(struct usb_interface *intf, const struct usb_device_id *id)
915{ 1009{
916 int r; 1010 int r;
@@ -919,6 +1013,9 @@ static int probe(struct usb_interface *intf, const struct usb_device_id *id)
919 1013
920 print_id(udev); 1014 print_id(udev);
921 1015
1016 if (id->driver_info & DEVICE_INSTALLER)
1017 return eject_installer(intf);
1018
922 switch (udev->speed) { 1019 switch (udev->speed) {
923 case USB_SPEED_LOW: 1020 case USB_SPEED_LOW:
924 case USB_SPEED_FULL: 1021 case USB_SPEED_FULL:
@@ -984,6 +1081,11 @@ static void disconnect(struct usb_interface *intf)
984 struct zd_mac *mac = zd_netdev_mac(netdev); 1081 struct zd_mac *mac = zd_netdev_mac(netdev);
985 struct zd_usb *usb = &mac->chip.usb; 1082 struct zd_usb *usb = &mac->chip.usb;
986 1083
1084 /* Either something really bad happened, or we're just dealing with
1085 * a DEVICE_INSTALLER. */
1086 if (netdev == NULL)
1087 return;
1088
987 dev_dbg_f(zd_usb_dev(usb), "\n"); 1089 dev_dbg_f(zd_usb_dev(usb), "\n");
988 1090
989 zd_netdev_disconnect(netdev); 1091 zd_netdev_disconnect(netdev);
@@ -999,7 +1101,6 @@ static void disconnect(struct usb_interface *intf)
999 */ 1101 */
1000 usb_reset_device(interface_to_usbdev(intf)); 1102 usb_reset_device(interface_to_usbdev(intf));
1001 1103
1002 /* If somebody still waits on this lock now, this is an error. */
1003 zd_netdev_free(netdev); 1104 zd_netdev_free(netdev);
1004 dev_dbg(&intf->dev, "disconnected\n"); 1105 dev_dbg(&intf->dev, "disconnected\n");
1005} 1106}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index d6420283bd5a..ded39de5f72d 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -30,6 +30,7 @@
30enum devicetype { 30enum devicetype {
31 DEVICE_ZD1211 = 0, 31 DEVICE_ZD1211 = 0,
32 DEVICE_ZD1211B = 1, 32 DEVICE_ZD1211B = 1,
33 DEVICE_INSTALLER = 2,
33}; 34};
34 35
35enum endpoints { 36enum endpoints {
@@ -73,17 +74,17 @@ enum control_requests {
73struct usb_req_read_regs { 74struct usb_req_read_regs {
74 __le16 id; 75 __le16 id;
75 __le16 addr[0]; 76 __le16 addr[0];
76} __attribute__((packed)); 77};
77 78
78struct reg_data { 79struct reg_data {
79 __le16 addr; 80 __le16 addr;
80 __le16 value; 81 __le16 value;
81} __attribute__((packed)); 82};
82 83
83struct usb_req_write_regs { 84struct usb_req_write_regs {
84 __le16 id; 85 __le16 id;
85 struct reg_data reg_writes[0]; 86 struct reg_data reg_writes[0];
86} __attribute__((packed)); 87};
87 88
88enum { 89enum {
89 RF_IF_LE = 0x02, 90 RF_IF_LE = 0x02,
@@ -100,7 +101,7 @@ struct usb_req_rfwrite {
100 /* RF2595: 24 */ 101 /* RF2595: 24 */
101 __le16 bit_values[0]; 102 __le16 bit_values[0];
102 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */ 103 /* (CR203 & ~(RF_IF_LE | RF_CLK | RF_DATA)) | (bit ? RF_DATA : 0) */
103} __attribute__((packed)); 104};
104 105
105/* USB interrupt */ 106/* USB interrupt */
106 107
@@ -117,12 +118,12 @@ enum usb_int_flags {
117struct usb_int_header { 118struct usb_int_header {
118 u8 type; /* must always be 1 */ 119 u8 type; /* must always be 1 */
119 u8 id; 120 u8 id;
120} __attribute__((packed)); 121};
121 122
122struct usb_int_regs { 123struct usb_int_regs {
123 struct usb_int_header hdr; 124 struct usb_int_header hdr;
124 struct reg_data regs[0]; 125 struct reg_data regs[0];
125} __attribute__((packed)); 126};
126 127
127struct usb_int_retry_fail { 128struct usb_int_retry_fail {
128 struct usb_int_header hdr; 129 struct usb_int_header hdr;
@@ -130,7 +131,7 @@ struct usb_int_retry_fail {
130 u8 _dummy; 131 u8 _dummy;
131 u8 addr[ETH_ALEN]; 132 u8 addr[ETH_ALEN];
132 u8 ibss_wakeup_dest; 133 u8 ibss_wakeup_dest;
133} __attribute__((packed)); 134};
134 135
135struct read_regs_int { 136struct read_regs_int {
136 struct completion completion; 137 struct completion completion;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 8459a18254a4..8746cc7c7088 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -24,8 +24,8 @@
24*/ 24*/
25 25
26#define DRV_NAME "yellowfin" 26#define DRV_NAME "yellowfin"
27#define DRV_VERSION "2.0" 27#define DRV_VERSION "2.1"
28#define DRV_RELDATE "Jun 27, 2006" 28#define DRV_RELDATE "Sep 11, 2006"
29 29
30#define PFX DRV_NAME ": " 30#define PFX DRV_NAME ": "
31 31
@@ -1307,8 +1307,6 @@ static void set_rx_mode(struct net_device *dev)
1307 /* Stop the Rx process to change any value. */ 1307 /* Stop the Rx process to change any value. */
1308 iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg); 1308 iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
1309 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1309 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1310 /* Unconditionally log net taps. */
1311 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1312 iowrite16(0x000F, ioaddr + AddrMode); 1310 iowrite16(0x000F, ioaddr + AddrMode);
1313 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) { 1311 } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
1314 /* Too many to filter well, or accept all multicasts. */ 1312 /* Too many to filter well, or accept all multicasts. */
@@ -1434,7 +1432,7 @@ static int __init yellowfin_init (void)
1434#ifdef MODULE 1432#ifdef MODULE
1435 printk(version); 1433 printk(version);
1436#endif 1434#endif
1437 return pci_module_init (&yellowfin_driver); 1435 return pci_register_driver(&yellowfin_driver);
1438} 1436}
1439 1437
1440 1438